diff --git a/CODEOWNERS b/CODEOWNERS index b3a89aecb5c1bf1907db23aabcb7d61adecc8ab3..638941c1a2a1765ad2a55dd81cf65ca72dce37be 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -338,7 +338,7 @@ zh-cn/application-dev/reference/apis/js-apis-distributedMissionManager.md @chenm zh-cn/application-dev/reference/apis/js-apis-document.md @panqinxu @zengyawen @bubble_mao @jinhaihw zh-cn/application-dev/reference/apis/js-apis-effectKit.md @zhangqiang183 @ge-yafang @wind_zj @zxg-gitee zh-cn/application-dev/reference/apis/js-apis-emitter.md @jayleehw @RayShih @li-weifeng2 @currydavids -zh-cn/application-dev/reference/apis/js-apis-EnterpriseAdminExtensionAbility.md @Buda-Liu @ningningW @budda-wang @yangqing3 +zh-cn/application-dev/reference/apis/js-apis-EnterpriseAdminExtensionAbility.md @liuzuming @ningningW @yangqing3 zh-cn/application-dev/reference/apis/js-apis-environment.md @panqinxu @zengyawen @bubble_mao @jinhaihw zh-cn/application-dev/reference/apis/js-apis-errorManager.md @littlejerry1 @RayShih @gwang2008 @chengxingzhen zh-cn/application-dev/reference/apis/js-apis-eventhub.md @jayleehw @RayShih @li-weifeng2 @currydavids @@ -384,7 +384,7 @@ zh-cn/application-dev/reference/apis/js-apis-lightweightmap.md @gongjunsong @ge- zh-cn/application-dev/reference/apis/js-apis-lightweightset.md @gongjunsong @ge-yafang @flyingwolf @BlackStone zh-cn/application-dev/reference/apis/js-apis-linkedlist.md @gongjunsong @ge-yafang @flyingwolf @BlackStone zh-cn/application-dev/reference/apis/js-apis-list.md @gongjunsong @ge-yafang @flyingwolf @BlackStone -zh-cn/application-dev/reference/apis/js-apis-logs.md @huaweimaxuchu @ningningW @niulihua @tomatodevboy +zh-cn/application-dev/reference/apis/js-apis-logs.md @gongjunsong @ge-yafang @flyingwolf @BlackStone zh-cn/application-dev/reference/apis/js-apis-media.md @liuyuehua1 @zengyawen @xxb-wzy @currydavids zh-cn/application-dev/reference/apis/js-apis-medialibrary.md @panqinxu @zengyawen @bubble_mao @jinhaihw zh-cn/application-dev/reference/apis/js-apis-mediaquery.md @huaweimaxuchu @HelloCrease @niulihua @tomatodevboy @@ -397,7 +397,7 @@ zh-cn/application-dev/reference/apis/js-apis-notification.md @jayleehw @RayShih zh-cn/application-dev/reference/apis/js-apis-observer.md @zhang-hai-feng @zengyawen @jyh926 @gaoxi785 zh-cn/application-dev/reference/apis/js-apis-osAccount.md @nianCode @zengyawen @JiDong-CS @murphy1984 zh-cn/application-dev/reference/apis/js-apis-particleAbility.md @littlejerry1 @RayShih @gwang2008 @chengxingzhen -zh-cn/application-dev/reference/apis/js-apis-pasteboard.md @feng-aiwen @ge-yafang @gong-a-shi @logic42 +zh-cn/application-dev/reference/apis/js-apis-pasteboard.md @han-zhengshi @ge-yafang @logic42 zh-cn/application-dev/reference/apis/js-apis-permissionrequestresult.md @littlejerry1 @RayShih @gwang2008 @chengxingzhen zh-cn/application-dev/reference/apis/js-apis-plainarray.md @gongjunsong @ge-yafang @flyingwolf @BlackStone zh-cn/application-dev/reference/apis/js-apis-pointer.md @yuanxinying @ningningW @cococoler @alien0208 @@ -415,7 +415,7 @@ zh-cn/application-dev/reference/apis/js-apis-resource-manager.md @Buda-Liu @ning zh-cn/application-dev/reference/apis/js-apis-router.md @huaweimaxuchu @HelloCrease @niulihua @tomatodevboy zh-cn/application-dev/reference/apis/js-apis-rpc.md @xuepianpian @RayShih @zhaopeng_gitee @vagrant_world zh-cn/application-dev/reference/apis/js-apis-runninglock.md @aqxyjay @zengyawen @aqxyjay @alien0208 -zh-cn/application-dev/reference/apis/js-apis-screen-lock.md @feng-aiwen @ningningW @wangzhangjun @murphy1984 + zh-cn/application-dev/reference/apis/js-apis-screen.md @zhangqiang183 @ge-yafang @zhouyaoying @zxg-gitee zh-cn/application-dev/reference/apis/js-apis-screenshot.md @zhangqiang183 @ge-yafang @zhouyaoying @zxg-gitee zh-cn/application-dev/reference/apis/js-apis-securityLabel.md @panqinxu @zengyawen @bubble_mao @jinhaihw @@ -456,7 +456,7 @@ zh-cn/application-dev/reference/apis/js-apis-system-vibrate.md @hellohyh001 @nin zh-cn/application-dev/reference/apis/js-apis-telephony-data.md @zhang-hai-feng @zengyawen @jyh926 @gaoxi785 zh-cn/application-dev/reference/apis/js-apis-testRunner.md @inter515 @littlejerry1 @RayShih @inter515 @jiyong zh-cn/application-dev/reference/apis/js-apis-thermal.md @aqxyjay @zengyawen @aqxyjay @alien0208 -zh-cn/application-dev/reference/apis/js-apis-timer.md @huaweimaxuchu @HelloCrease @niulihua @tomatodevboy +zh-cn/application-dev/reference/apis/js-apis-timer.md @gongjunsong @ge-yafang @flyingwolf @BlackStone zh-cn/application-dev/reference/apis/js-apis-touchevent.md @mayunteng_1 @ningningW @cococoler @alien0208 zh-cn/application-dev/reference/apis/js-apis-treemap.md @gongjunsong @ge-yafang @flyingwolf @BlackStone zh-cn/application-dev/reference/apis/js-apis-treeset.md @gongjunsong @ge-yafang @flyingwolf @BlackStone @@ -531,6 +531,7 @@ zh-cn/application-dev/reference/apis/js-apis-distributedBundle.md @shuaytao @Ray zh-cn/application-dev/reference/apis/js-apis-distributedKVStore.md @feng-aiwen @ge-yafang @gong-a-shi @logic42 zh-cn/application-dev/reference/apis/js-apis-enterprise-accountManager.md @liuzuming @ningningW @yangqing3 zh-cn/application-dev/reference/apis/js-apis-enterprise-adminManager.md @liuzuming @ningningW @yangqing3 +zh-cn/application-dev/reference/apis/js-apis-enterprise-bundleManager.md @liuzuming @ningningW @yangqing3 zh-cn/application-dev/reference/apis/js-apis-enterprise-dateTimeManager.md @liuzuming @ningningW @yangqing3 zh-cn/application-dev/reference/apis/js-apis-enterprise-deviceControl.md @liuzuming @ningningW @yangqing3 zh-cn/application-dev/reference/apis/js-apis-enterprise-deviceInfo.md @liuzuming @ningningW @yangqing3 diff --git a/README.md b/README.md index 9351f7f58964229343c0dbfc3c9875e212a4a54f..a9f000cbfb9135b87307c440d21779f2fc2f07e2 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ This repository stores device and application development documents provided by - master: the latest version. - - OpenHarmony 3.2 Beta5. [Learn more](en/release-notes/OpenHarmony-v3.2-beta5.md) + - OpenHarmony 3.2 Release. [Learn more](en/release-notes/OpenHarmony-v3.2-release.md) - OpenHarmony 3.1 Release. [Learn more](en/release-notes/OpenHarmony-v3.1-release.md) diff --git a/README_zh.md b/README_zh.md index 6e71a0abd9fba5cbe4b4de47c77437d3a851aa45..b69235fdbdc19771dbae119f8873b610a7adf65c 100644 --- a/README_zh.md +++ b/README_zh.md @@ -18,7 +18,7 @@ - master:最新开发版本。 - - OpenHarmony 3.2 Beta5版本:点击[此处](zh-cn/release-notes/OpenHarmony-v3.2-beta5.md)了解版本详情。 + - OpenHarmony 3.2 Release版本:点击[此处](zh-cn/release-notes/OpenHarmony-v3.2-release.md)了解版本详情。 - OpenHarmony 3.1 Release版本:点击[此处](zh-cn/release-notes/OpenHarmony-v3.1-release.md)了解版本详情。 diff --git a/en/OpenHarmony-Overview.md b/en/OpenHarmony-Overview.md index a8dbb1b636a19fe6a2b7e6dee741b56dac0e6289..1aecb0171e73b02bcbcb9347497f594bf96c912e 100644 --- a/en/OpenHarmony-Overview.md +++ b/en/OpenHarmony-Overview.md @@ -189,7 +189,7 @@ For details about how to obtain the source code of OpenHarmony, see [Source Code ## How to Participate -For details about how to join in the OpenHarmony community, see [OpenHarmony Community](https://gitee.com/openharmony/community/blob/master/README-EN.md) +For details about how to join in the OpenHarmony community, see [OpenHarmony Community](https://gitee.com/openharmony/community/blob/master/README_EN.md) For details about how to contribute, see [How to contribute](contribute/how-to-contribute.md). diff --git a/en/application-dev/IDL/idl-guidelines.md b/en/application-dev/IDL/idl-guidelines.md index f165215bad4d663b794c249f8029d33aeeda5863..a7ce0ec46adeeca0dd697cd8dabde834b7cc14fc 100644 --- a/en/application-dev/IDL/idl-guidelines.md +++ b/en/application-dev/IDL/idl-guidelines.md @@ -3,7 +3,7 @@ ## IDL Overview To ensure successful communications between the client and server, interfaces recognized by both parties must be defined. The OpenHarmony Interface Definition Language (IDL) is a tool for defining such interfaces. OpenHarmony IDL decomposes objects to be transferred into primitives that can be understood by the operating system and encapsulates cross-boundary objects based on developers' requirements. - **Figure 1** IDL interface description +**Figure 1** IDL interface description ![IDL-interface-description](./figures/IDL-interface-description.png) @@ -156,11 +156,13 @@ On DevEco Studio, choose **Tools > SDK Manager** to view the local installation Go to the local installation path, choose **toolchains > 3.x.x.x** (the folder named after the version number), and check whether the executable file of IDL exists. -> **NOTE**: Use the SDK of the latest version. The use of an earlier version may cause errors in some statements. +> **NOTE** +> +> Use the SDK of the latest version. The use of an earlier version may cause errors in some statements. -If the executable file does not exist, download the SDK package from the mirror as instructed in the [Release Notes](../../release-notes). The following uses the [3.2 Beta3](../../release-notes/OpenHarmony-v3.2-beta3.md#acquiring-source-code-from-mirrors) as an example. +If the executable file does not exist, download the SDK package from the mirror as instructed in the [Release Notes](../../release-notes). The following uses [3.2 Beta3](../../release-notes/OpenHarmony-v3.2-beta3.md) as an example. -For details about how to replace the SDK package, see [Guide to Switching to Full SDK](../quick-start/full-sdk-switch-guide.md). +For details about how to replace the SDK package, see [Full SDK Compilation Guide](../quick-start/full-sdk-compile-guide.md). After obtaining the executable file, perform subsequent development steps based on your scenario. @@ -176,6 +178,8 @@ You can use TS to create IDL files. interface OHOS.IIdlTestService { int TestIntTransaction([in] int data); void TestStringTransaction([in] String data); + void TestMapTransaction([in] Map data); + int TestArrayTransaction([in] String[] data); } ``` @@ -183,7 +187,9 @@ Run the **idl -gen-ts -d *dir* -c dir/IIdlTestService.idl** command in the folde -*dir* next to **d** is the target output folder. For example, if the target output folder is **IIdlTestServiceTs**, run the **idl -gen-ts -d IIdlTestServiceTs -c IIdlTestServiceTs/IIdlTestService.idl** command in the folder where the executable file is located. The interface file, stub file, and proxy file are generated in the *dir* directory (**IIdlTestServiceTs** directory in this example) in the execution environment. -> **NOTE**: The generated interface class file name must be the same as that of the .idl file. Otherwise, an error occurs during code generation. +> **NOTE** +> +> The generated interface class file name must be the same as that of the .idl file. Otherwise, an error occurs during code generation. For example, for an .idl file named **IIdlTestService.idl** and target output directory named **IIdlTestServiceTs**, the directory structure is similar to the following: @@ -203,6 +209,8 @@ The stub class generated by IDL is an abstract implementation of the interface c ```ts import {testIntTransactionCallback} from "./i_idl_test_service"; import {testStringTransactionCallback} from "./i_idl_test_service"; +import {testMapTransactionCallback} from "./i_idl_test_service"; +import {testArrayTransactionCallback} from "./i_idl_test_service"; import IIdlTestService from "./i_idl_test_service"; import rpc from "@ohos.rpc"; @@ -211,8 +219,8 @@ export default class IdlTestServiceStub extends rpc.RemoteObject implements IIdl super(des); } - async onRemoteRequestEx(code: number, data, reply, option): Promise { - console.log("onRemoteRequestEx called, code = " + code); + async onRemoteMessageRequest(code: number, data, reply, option): Promise { + console.log("onRemoteMessageRequest called, code = " + code); switch(code) { case IdlTestServiceStub.COMMAND_TEST_INT_TRANSACTION: { let _data = data.readInt(); @@ -231,6 +239,29 @@ export default class IdlTestServiceStub extends rpc.RemoteObject implements IIdl }); return true; } + case IdlTestServiceStub.COMMAND_TEST_MAP_TRANSACTION: { + let _data = new Map(); + let _dataSize = data.readInt(); + for (let i = 0; i < _dataSize; ++i) { + let key = data.readInt(); + let value = data.readInt(); + _data.set(key, value); + } + this.testMapTransaction(_data, (errCode) => { + reply.writeInt(errCode); + }); + return true; + } + case IdlTestServiceStub.COMMAND_TEST_ARRAY_TRANSACTION: { + let _data = data.readStringArray(); + this.testArrayTransaction(_data, (errCode, returnValue) => { + reply.writeInt(errCode); + if (errCode == 0) { + reply.writeInt(returnValue); + } + }); + return true; + } default: { console.log("invalid request code" + code); break; @@ -241,17 +272,23 @@ export default class IdlTestServiceStub extends rpc.RemoteObject implements IIdl testIntTransaction(data: number, callback: testIntTransactionCallback): void{} testStringTransaction(data: string, callback: testStringTransactionCallback): void{} + testMapTransaction(data: Map, callback: testMapTransactionCallback): void{} + testArrayTransaction(data: string[], callback: testArrayTransactionCallback): void{} static readonly COMMAND_TEST_INT_TRANSACTION = 1; static readonly COMMAND_TEST_STRING_TRANSACTION = 2; + static readonly COMMAND_TEST_MAP_TRANSACTION = 3; + static readonly COMMAND_TEST_ARRAY_TRANSACTION = 4; } ``` -You need to inherit the interface class defined in the IDL file and implement the methods in the class. The following code snippet shows how to inherit the **IdlTestServiceStub** interface class and implement the **testIntTransaction** and **testStringTransaction** methods. +You need to inherit the interface class defined in the IDL file and implement the methods in the class. The following code snippet shows how to inherit the **IdlTestServiceStub** interface class and implement the **testIntTransaction**, **testStringTransaction**, **testMapTransaction**, and **testArrayTransaction** methods. ```ts import {testIntTransactionCallback} from "./i_idl_test_service" import {testStringTransactionCallback} from "./i_idl_test_service" +import {testMapTransactionCallback} from "./i_idl_test_service"; +import {testArrayTransactionCallback} from "./i_idl_test_service"; import IdlTestServiceStub from "./idl_test_service_stub" @@ -265,6 +302,14 @@ class IdlTestImp extends IdlTestServiceStub { { callback(0); } + testMapTransaction(data: Map, callback: testMapTransactionCallback): void + { + callback(0); + } + testArrayTransaction(data: string[], callback: testArrayTransactionCallback): void + { + callback(0, 1); + } } ``` @@ -320,11 +365,28 @@ function callbackTestStringTransaction(result: number): void { } } +function callbackTestMapTransaction(result: number): void { + if (result == 0) { + console.log('case 3 success'); + } +} + +function callbackTestArrayTransaction(result: number, ret: number): void { + if (result == 0 && ret == 124) { + console.log('case 4 success'); + } +} + var onAbilityConnectDone = { onConnect:function (elementName, proxy) { let testProxy = new IdlTestServiceProxy(proxy); + let testMap = new Map(); + testMap.set(1, 1); + testMap.set(1, 2); testProxy.testIntTransaction(123, callbackTestIntTransaction); testProxy.testStringTransaction('hello', callbackTestStringTransaction); + testProxy.testMapTransaction(testMap, callbackTestMapTransaction); + testProxy.testArrayTransaction(['1','2'], callbackTestMapTransaction); }, onDisconnect:function (elementName) { console.log('onDisconnectService onDisconnect'); diff --git a/en/application-dev/Readme-EN.md b/en/application-dev/Readme-EN.md index 73bbd2d608562535e3272c1a659bcebbd39b125a..f71b814661e652486bf1a61ee3e7c7dd23dbcf4a 100644 --- a/en/application-dev/Readme-EN.md +++ b/en/application-dev/Readme-EN.md @@ -24,6 +24,12 @@ - [Multi-HAP Usage Rules](quick-start/multi-hap-rules.md) - [Multi-HAP Operation Mechanism and Data Communication Modes](quick-start/multi-hap-principles.md) - [Application Installation and Uninstallation Process](quick-start/application-package-install-uninstall.md) + - Shared Package + - [Shared Package Overview](quick-start/shared-guide.md) + - [HAR](quick-start/har-package.md) + - HSP + - [In-Application HSP Development](quick-start/in-app-hsp.md) + - [Inter-Application HSP Development (for System Applications Only)](quick-start/cross-app-hsp.md) - Application Configuration Files in Stage Model - [Application Configuration File Overview (Stage Model)](quick-start/application-configuration-file-overview-stage.md) - [app.json5 Configuration File](quick-start/app-configuration-file.md) @@ -36,18 +42,44 @@ - [Resource Categories and Access](quick-start/resource-categories-and-access.md) - Learning ArkTS - [Getting Started with ArkTS](quick-start/arkts-get-started.md) - - ArkTS Syntax (Declarative UI) - - [Basic UI Description](quick-start/arkts-basic-ui-description.md) - - State Management - - [Basic Concepts](quick-start/arkts-state-mgmt-concepts.md) - - [State Management with Page-level Variables](quick-start/arkts-state-mgmt-page-level.md) - - [State Management with Application-level Variables](quick-start/arkts-state-mgmt-application-level.md) - - [Dynamic UI Element Building](quick-start/arkts-dynamic-ui-elememt-building.md) - - [Rendering Control](quick-start/arkts-rendering-control.md) - - [Restrictions and Extensions](quick-start/arkts-restrictions-and-extensions.md) + - Basic Syntax + - [Basic Syntax Overview](quick-start/arkts-basic-syntax-overview.md) + - [Declarative UI Description](quick-start/arkts-declarative-ui-description.md) + - Custom Component + - [Creating a Custom Component](quick-start/arkts-create-custom-components.md) + - [Page and Custom Component Lifecycle](quick-start/arkts-page-custom-components-lifecycle.md) + - [\@Builder: Custom Builder Function](quick-start/arkts-builder.md) + - [\@BuilderParam: @Builder Function Reference](quick-start/arkts-builderparam.md) + - [\@Styles: Definition of Resusable Styles](quick-start/arkts-style.md) + - [\@Extend: Extension of Built-in Components](quick-start/arkts-extend.md) + - [stateStyles: Polymorphic Style](quick-start/arkts-statestyles.md) + - State Management + - [State Management Overview](quick-start/arkts-state-management-overview.md) + - Component State Management + - [\@State: State Owned by Component](quick-start/arkts-state.md) + - [\@Prop: One-Way Synchronization from Parent to Child Components](quick-start/arkts-prop.md) + - [\@Link: Two-Way Synchronization Between Parent and Child Components](quick-start/arkts-link.md) + - [\@Provide and \@Consume: Two-Way Synchronization with Descendant Components](quick-start/arkts-provide-and-consume.md) + - [\@Observed and \@ObjectLink: Observing Attribute Changes in Nested Class Objects](quick-start/arkts-observed-and-objectlink.md) + - Application State Management + - [Application State Management Overview](quick-start/arkts-application-state-management-overview.md) + - [LocalStorage: UI State Storage](quick-start/arkts-localstorage.md) + - [AppStorage: Application-wide UI State Storage](quick-start/arkts-appstorage.md) + - [PersistentStorage: Application State Persistence](quick-start/arkts-persiststorage.md) + - [Environment: Device Environment Query](quick-start/arkts-environment.md) + - Other State Management Features + - [Overview of Other State Management Features](quick-start/arkts-other-state-mgmt-functions-overview.md) + - [\@Watch: Getting Notified of State Variable Changes](quick-start/arkts-watch.md) + - [$$ Syntax: Two-Way Synchronization of Built-in Components](quick-start/arkts-two-way-sync.md) + - Rendering Control + - [Rendering Control Overview](quick-start/arkts-rendering-control-overview.md) + - [if/else: Conditional Rendering](quick-start/arkts-rendering-control-ifelse.md) + - [ForEach: Rendering of Repeated Content](quick-start/arkts-rendering-control-foreach.md) + - [LazyForEach: Lazy Data Loading](quick-start/arkts-rendering-control-lazyforeach.md) - Development - [Application Models](application-models/Readme-EN.md) - [UI Development](ui/Readme-EN.md) + - [Web](web/Readme-EN.md) - [Notification](notification/Readme-EN.md) - [Window Manager](windowmanager/Readme-EN.md) - [WebGL](webgl/Readme-EN.md) @@ -81,8 +113,10 @@ - [ArkTS and JS APIs](reference/apis/Readme-EN.md) - [Error Codes](reference/errorcodes/Readme-EN.md) - Native APIs + - [Native APIs](reference/native-apis/Readme-EN.md) - [Standard Libraries](reference/native-lib/third_party_libc/musl.md) - [Node_API](reference/native-lib/third_party_napi/napi.md) - [FAQs](faqs/Readme-EN.md) - Contribution - [How to Contribute](../contribute/documentation-contribution.md) + \ No newline at end of file diff --git a/en/application-dev/ability-deprecated/ability-delegator.md b/en/application-dev/ability-deprecated/ability-delegator.md index f72a192dc510c28104511fb1530a915c9f9827cc..b32d472176a5b6270fece94ae4bd8ae9a7bd73fa 100644 --- a/en/application-dev/ability-deprecated/ability-delegator.md +++ b/en/application-dev/ability-deprecated/ability-delegator.md @@ -63,7 +63,7 @@ For details about how to use DevEco Studio to start the test framework, see [Ope **Example** ```javascript -import AbilityDelegatorRegistry from '@ohos.app.ability.abilityDelegatorRegistry'; +import AbilityDelegatorRegistry from '@ohos.application.abilityDelegatorRegistry' function onAbilityCreateCallback(data) { console.info("onAbilityCreateCallback"); @@ -87,11 +87,11 @@ abilityDelegator.addAbilityMonitor(monitor).then(() => { **Modules to Import** ```javascript -import AbilityDelegatorRegistry from '@ohos.app.ability.abilityDelegatorRegistry'; +import AbilityDelegatorRegistry from '@ohos.application.abilityDelegatorRegistry' ``` ```javascript -var abilityDelegator = AbilityDelegatorRegistry.getAbilityDelegator(); +var abilityDelegator = AbilityDelegatorRegistry.getAbilityDelegator() ``` ### Starting an Ability and Listening for the Ability State diff --git a/en/application-dev/ability-deprecated/fa-dataability.md b/en/application-dev/ability-deprecated/fa-dataability.md index 8d94e8f225a3966d676e6c7631968c25f5634531..217f617db77ff329eb1d0fa0eef7dcb6172cf45a 100644 --- a/en/application-dev/ability-deprecated/fa-dataability.md +++ b/en/application-dev/ability-deprecated/fa-dataability.md @@ -154,7 +154,7 @@ The basic dependency packages include: import featureAbility from '@ohos.ability.featureAbility' import ohos_data_ability from '@ohos.data.dataAbility' import ohos_data_rdb from '@ohos.data.rdb' - + var urivar = "dataability:///com.ix.DataAbility" var DAHelper = featureAbility.acquireDataAbilityHelper( urivar diff --git a/en/application-dev/ability-deprecated/fa-formability.md b/en/application-dev/ability-deprecated/fa-formability.md index 5c08a1b0b3955472d6f3b16cf7a343a083a0116a..96ed58d8ef2206d6c66e413d0a6fc34423651974 100644 --- a/en/application-dev/ability-deprecated/fa-formability.md +++ b/en/application-dev/ability-deprecated/fa-formability.md @@ -25,7 +25,7 @@ Carry out the following operations to develop the widget provider based on the [ 1. Implement lifecycle callbacks by using the **LifecycleForm** APIs. 2. Create a **FormBindingData** instance. 3. Update a widget by using the **FormProvider** APIs. -4. Develop the widget UI pages. +4. Develop the widget UI page. ## Available APIs @@ -231,7 +231,7 @@ You should override **onDestroy** to implement widget data deletion. } ``` -For details about how to implement persistent data storage, see [Lightweight Data Store Development](../database/database-preference-guidelines.md). +For details about how to implement persistent data storage, see [Data Persistence by User Preferences](../database/data-persistence-by-preferences.md). The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary. @@ -402,3 +402,5 @@ The code snippet is as follows: } } ``` + + \ No newline at end of file diff --git a/en/application-dev/ability-deprecated/fa-pageability.md b/en/application-dev/ability-deprecated/fa-pageability.md index 28b5ce36e292acc9e350f8ae96cb7bcf17f8c8c3..e28c0f2823ff61f6c60f469eaaf9d197184e8f50 100644 --- a/en/application-dev/ability-deprecated/fa-pageability.md +++ b/en/application-dev/ability-deprecated/fa-pageability.md @@ -47,7 +47,7 @@ You can specify the launch type by setting **launchType** in the **config.json** | Launch Type | Description |Description | | ----------- | ------- |---------------- | -| standard | Multi-instance | A new instance is started each time an ability starts.| +| standard | Multi-instance | A new instance is started each time an ability starts.| | singleton | Singleton | The ability has only one instance in the system. If an instance already exists when an ability is started, that instance is reused.| By default, **singleton** is used. diff --git a/en/application-dev/ability-deprecated/stage-ability-continuation.md b/en/application-dev/ability-deprecated/stage-ability-continuation.md index b53d57d849c8c771b92d4e86a2095163aab0a395..f99966aff24d9b465627ba475cda018671820809 100644 --- a/en/application-dev/ability-deprecated/stage-ability-continuation.md +++ b/en/application-dev/ability-deprecated/stage-ability-continuation.md @@ -6,7 +6,7 @@ Ability continuation is to continue the current mission of an application, inclu ## Available APIs -The following table lists the APIs used for ability continuation. For details about the APIs, see [Ability](../reference/apis/js-apis-application-ability.md). +The following table lists the APIs used for ability continuation. For details about the APIs, see [UIAbility](../reference/apis/js-apis-app-ability-uiAbility.md). **Table 1** Ability continuation APIs @@ -48,96 +48,88 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar } ``` - - - - Configure the application startup type. - - If **launchType** is set to **standard** in the **module.json5** file, the application is of the multi-instance launch type. During ability continuation, regardless of whether the application is already open, the target starts the application and restores the UI page. If **launchType** is set to **singleton**, the application is of the singleton launch type. If the application is already open, the target clears the existing page stack and restores the UI page. For more information, see "Launch Type" in [Ability Development](./stage-ability.md). + + If **launchType** is set to **multiton** in the **module.json5** file, the application is of the multi-instance launch type. During ability continuation, regardless of whether the application is already open, the target starts the application and restores the UI page. If **launchType** is set to **singleton**, the application is of the singleton launch type. If the application is already open, the target clears the existing page stack and restores the UI page. For more information, see "Launch Type" in [Ability Development](./stage-ability.md). + + Configure a multi-instance application as follows: + + ```javascript + { + "module": { + "abilities": [ + { + "launchType": "multiton" + } + ] + } + } + ``` + + Configure a singleton application as follows or retain the default settings of **launchType**: + + ```javascript + { + "module": { + "abilities": [ + { + "launchType": "singleton" + } + ] + } + } + ``` + + - Apply for the distributed permissions. - Configure a multi-instance application as follows: + Declare the **DISTRIBUTED_DATASYNC** permission in the **module.json5** file for the application. - ```javascript - { - "module": { - "abilities": [ - { - "launchType": "standard" - } - ] - } - } - ``` + ```javascript + "requestPermissions": [ + { + "name": "ohos.permission.DISTRIBUTED_DATASYNC" + }, + ``` - Configure a singleton application as follows or retain the default settings of **launchType**: + This permission must be granted by the user in a dialog box when the application is started for the first time. To enable the application to display a dialog box to ask for the permission, add the following code to **onWindowStageCreate** of the **Ability** class: - ```javascript - { - "module": { - "abilities": [ - { - "launchType": "singleton" + ```javascript + requestPermissions = async () => { + let permissions: Array = [ + "ohos.permission.DISTRIBUTED_DATASYNC" + ]; + let needGrantPermission = false + let accessManger = accessControl.createAtManager() + Logger.info("app permission get bundle info") + let bundleInfo = await bundle.getApplicationInfo(BUNDLE_NAME, 0, 100) + Logger.info(`app permission query permission ${bundleInfo.accessTokenId.toString()}`) + for (const permission of permissions) { + Logger.info(`app permission query grant status ${permission}`) + try { + let grantStatus = await accessManger.verifyAccessToken(bundleInfo.accessTokenId, permission) + if (grantStatus === PERMISSION_REJECT) { + needGrantPermission = true + break; + } + } catch (err) { + Logger.error(`app permission query grant status error ${permission} ${JSON.stringify(err)}`) + needGrantPermission = true + break; + } + } + if (needGrantPermission) { + Logger.info("app permission needGrantPermission") + try { + await accessManger.requestPermissionsFromUser(this.context, permissions) + } catch (err) { + Logger.error(`app permission ${JSON.stringify(err)}`) + } + } else { + Logger.info("app permission already granted") + } } - ] - } - } - ``` - - - - - Apply for the distributed permissions. - - Declare the **DISTRIBUTED_DATASYNC** permission in the **module.json5** file for the application. - - ```javascript - "requestPermissions": [ - { - "name": "ohos.permission.DISTRIBUTED_DATASYNC" - }, - ``` - - - - This permission must be granted by the user in a dialog box when the application is started for the first time. To enable the application to display a dialog box to ask for the permission, add the following code to **onWindowStageCreate** of the **Ability** class: - - ```javascript - requestPermissions = async () => { - let permissions: Array = [ - "ohos.permission.DISTRIBUTED_DATASYNC" - ]; - let needGrantPermission = false - let accessManger = accessControl.createAtManager() - Logger.info("app permission get bundle info") - let bundleInfo = await bundle.getApplicationInfo(BUNDLE_NAME, 0, 100) - Logger.info(`app permission query permission ${bundleInfo.accessTokenId.toString()}`) - for (const permission of permissions) { - Logger.info(`app permission query grant status ${permission}`) - try { - let grantStatus = await accessManger.verifyAccessToken(bundleInfo.accessTokenId, permission) - if (grantStatus === PERMISSION_REJECT) { - needGrantPermission = true - break; - } - } catch (err) { - Logger.error(`app permission query grant status error ${permission} ${JSON.stringify(err)}`) - needGrantPermission = true - break; - } - } - if (needGrantPermission) { - Logger.info("app permission needGrantPermission") - try { - await accessManger.requestPermissionsFromUser(this.context, permissions) - } catch (err) { - Logger.error(`app permission ${JSON.stringify(err)}`) - } - } else { - Logger.info("app permission already granted") - } - } - ``` - - + ``` + 2. Implement the **onContinue()** API. @@ -155,7 +147,7 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar You can obtain the target device ID (identified by the key **targetDevice**) and the version number (identified by the key **version**) of the application installed on the target device from the **wantParam** parameter of this API. The version number can be used for compatibility check. If the current application version is incompatible with that on the target device, **OnContinueResult.MISMATCH** can be returned to reject the continuation request. - Example + Example: ```javascript onContinue(wantParam : {[key: string]: any}) { @@ -168,8 +160,6 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar } ``` - - 3. Implement the continuation logic in the **onCreate()** or **onNewWant()** API. The **onCreate()** API is called by the target. When the ability is started on the target device, this API is called to instruct the application to synchronize the memory data and UI component state, and triggers page restoration after the synchronization is complete. If the continuation logic is not implemented, the ability will be started in common startup mode and the page cannot be restored. @@ -178,11 +168,9 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar After data restore is complete, call **restoreWindowStage** to trigger page restoration. - - You can also use **want.parameters.version** in the **want** parameter to obtain the application version number of the initiator. - - Example + + Example: ```javascript import UIAbility from '@ohos.app.ability.UIAbility'; @@ -190,7 +178,7 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar export default class EntryAbility extends UIAbility { storage : LocalStorag; - + onCreate(want, launchParam) { Logger.info(`EntryAbility onCreate ${AbilityConstant.LaunchReason.CONTINUATION}`) if (launchParam.launchReason == AbilityConstant.LaunchReason.CONTINUATION) { @@ -211,7 +199,7 @@ For a singleton ability, use **onNewWant()** to achieve the same implementation. Use distributed objects. -Distributed objects allow cross-device data synchronization like local variables. For two devices that form a Super Device, when data in the distributed data object of an application is added, deleted, or modified on a device, the data for the same application is also updated on the other device. Both devices can listen for the data changes and online and offline states of the other. For details, see [Distributed Data Object Development](../database/database-distributedobject-guidelines.md). +Distributed objects allow cross-device data synchronization like local variables. For two devices that form a Super Device, when data in the distributed data object of an application is added, deleted, or modified on a device, the data for the same application is also updated on the other device. Both devices can listen for the data changes and online and offline states of the other. For details, see [Sharing Distributed Data Objects](../database/data-sync-of-distributed-data-object.md). In the ability continuation scenario, the distributed data object is used to synchronize the memory data from the local device to the target device. @@ -249,8 +237,6 @@ In the ability continuation scenario, the distributed data object is used to syn }); ``` - - - The target device obtains the session ID from **onCreate()**, creates a distributed object, and associates the distributed object with the session ID. In this way, the distributed object can be synchronized. Before calling **restoreWindowStage**, ensure that all distributed objects required for continuation have been associated. ```javascript @@ -283,8 +269,6 @@ In the ability continuation scenario, the distributed data object is used to syn } ``` - - ### More Information 1. Timeout @@ -294,15 +278,13 @@ In the ability continuation scenario, the distributed data object is used to syn 2. By default, the system supports page stack information migration, which means that the page stack of the initiator will be automatically migrated to the target device. No adaptation is required. - - ### Restrictions 1. The continuation must be performed between the same ability, which means the same bundle name, module name, and ability name. For details, see [Application Package Structure Configuration File](../quick-start/module-configuration-file.md). 2. Currently, the application can only implement the continuation capability. The continuation action must be initiated by the system. - - ### Best Practice For better user experience, you are advised to use the **wantParam** parameter to transmit data smaller than 100 KB and use distributed objects to transmit data larger than 100 KB. + + \ No newline at end of file diff --git a/en/application-dev/ability-deprecated/stage-ability.md b/en/application-dev/ability-deprecated/stage-ability.md index 60f954c78f306193e7bfefe1e6ceee2babf86da4..2cd18f7aa3052cee86785d55bc81d68cfdece802 100644 --- a/en/application-dev/ability-deprecated/stage-ability.md +++ b/en/application-dev/ability-deprecated/stage-ability.md @@ -12,8 +12,8 @@ An ability can be launched in the **standard**, **singleton**, or **specified** | Launch Type | Description |Action | | ----------- | ------- |---------------- | -| standard | Standard mode | A new instance is started each time an ability starts.| -| singleton | Singleton mode | The ability has only one instance in the system. If an instance already exists when an ability is started, that instance is reused.| +| multiton | Multi-instance mode| A new instance is started each time an ability starts.| +| singleton | Singleton mode | Default type. The ability has only one instance in the system. If an instance already exists when an ability is started, that instance is reused.| | specified | Instance-specific| The internal service of an ability determines whether to create multiple instances during running.| By default, the singleton mode is used. The following is an example of the **module.json5** file: @@ -39,7 +39,7 @@ The table below describes the APIs provided by the **AbilityStage** class, which |onAcceptWant(want: Want): string|Called when a specified ability is started.| |onConfigurationUpdated(config: Configuration): void|Called when the global configuration is updated.| -The table below describes the APIs provided by the **Ability** class. For details about the APIs, see [Ability](../reference/apis/js-apis-application-ability.md). +The table below describes the APIs provided by the **Ability** class. For details about the APIs, see [UIAbility](../reference/apis/js-apis-app-ability-uiAbility.md). **Table 2** Ability APIs @@ -190,7 +190,7 @@ export default class EntryAbility extends UIAbility { ``` ## Starting an Ability ### Available APIs -The **Ability** class has the **context** attribute, which belongs to the **AbilityContext** class. The **AbilityContext** class has the **abilityInfo**, **currentHapModuleInfo**, and other attributes as well as the APIs used for starting abilities. For details, see [AbilityContext](../reference/apis/js-apis-ability-context.md). +The **Ability** class has the **context** attribute, which belongs to the **AbilityContext** class. The **AbilityContext** class has the **abilityInfo**, **currentHapModuleInfo**, and other attributes as well as the APIs used for starting abilities. For details, see [AbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md). **Table 3** AbilityContext APIs |API|Description| @@ -207,7 +207,7 @@ The **Ability** class has the **context** attribute, which belongs to the **Abil An application can obtain the context of an **Ability** instance through **this.context** and then use the **startAbility** API in the **AbilityContext** class to start the ability. The ability can be started by specifying **Want**, **StartOptions**, and **accountId**, and the operation result can be returned using a callback or **Promise** instance. The sample code is as follows: ```ts let context = this.context -var want = { +let want = { "deviceId": "", "bundleName": "com.example.MyApplication", "abilityName": "EntryAbility" @@ -224,7 +224,7 @@ context.startAbility(want).then(() => { In the cross-device scenario, you must specify the ID of the remote device. The sample code is as follows: ```ts let context = this.context -var want = { +let want = { "deviceId": getRemoteDeviceId(), "bundleName": "com.example.MyApplication", "abilityName": "EntryAbility" @@ -239,9 +239,9 @@ Obtain the ID of a specified device from **DeviceManager**. The sample code is a ```ts import deviceManager from '@ohos.distributedHardware.deviceManager'; function getRemoteDeviceId() { - if (typeof dmClass === 'object' && dmClass != null) { - var list = dmClass.getTrustedDeviceListSync(); - if (typeof (list) == 'undefined' || typeof (list.length) == 'undefined') { + if (typeof dmClass === 'object' && dmClass !== null) { + let list = dmClass.getTrustedDeviceListSync(); + if (typeof (list) === 'undefined' || typeof (list.length) === 'undefined') { console.log("EntryAbility onButtonClick getRemoteDeviceId err: list is null"); return; } diff --git a/en/application-dev/ability-deprecated/stage-call.md b/en/application-dev/ability-deprecated/stage-call.md index 71f5f6934dda385161f4adcb95837924c691c278..d9269295e06633fa0f55bdebad51eb1c354f2934 100644 --- a/en/application-dev/ability-deprecated/stage-call.md +++ b/en/application-dev/ability-deprecated/stage-call.md @@ -31,12 +31,12 @@ The ability call process is as follows: > Currently, only system applications can use the ability call. ## Available APIs -The table below describes the ability call APIs. For details, see [Ability](../reference/apis/js-apis-application-ability.md#caller). +The table below describes the ability call APIs. For details, see [UIAbility](../reference/apis/js-apis-app-ability-uiAbility.md#caller). **Table 2** Ability call APIs |API|Description| |:------|:------| -|startAbilityByCall(want: Want): Promise\|Starts an ability in the foreground (through the **want** configuration) or background (default) and obtains the **Caller** object for communication with the ability. For details, see [AbilityContext](../reference/apis/js-apis-ability-context.md#abilitycontextstartabilitybycall) or **ServiceExtensionContext**.| +|startAbilityByCall(want: Want): Promise\|Starts an ability in the foreground (through the **want** configuration) or background (default) and obtains the **Caller** object for communication with the ability. For details, see [AbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartabilitybycall) or **ServiceExtensionContext**.| |on(method: string, callback: CalleeCallBack): void|Callback invoked when the callee ability registers a method.| |off(method: string): void|Callback invoked when the callee ability deregisters a method.| |call(method: string, data: rpc.Sequenceable): Promise\|Sends agreed sequenceable data to the callee ability.| @@ -47,242 +47,263 @@ The table below describes the ability call APIs. For details, see [Ability](../r ## How to Develop The procedure for developing the ability call is as follows: 1. Create a callee ability. - 2. Access the callee ability. ### Creating a Callee Ability For the callee ability, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener. -**1. Configure the ability launch type.** - - Set **launchType** of the callee ability to **singleton** in the **module.json5** file. -|JSON Field|Description| -|:------|:------| -|"launchType"|Ability launch type. Set this parameter to **singleton**.| - -An example of the ability configuration is as follows: -```json -"abilities":[{ - "name": ".CalleeAbility", - "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts", - "launchType": "singleton", - "description": "$string:CalleeAbility_desc", - "icon": "$media:icon", - "label": "$string:CalleeAbility_label", - "visible": true -}] -``` -**2. Import the Ability module.** -```ts -import Ability from '@ohos.app.ability.UIAbility' -``` -**3. Define the agreed sequenceable data.** - - The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string. The code snippet is as follows: -```ts -export default class MySequenceable { - num: number = 0 - str: string = "" - - constructor(num, string) { - this.num = num - this.str = string - } - - marshalling(messageParcel) { - messageParcel.writeInt(this.num) - messageParcel.writeString(this.str) - return true - } - - unmarshalling(messageParcel) { - this.num = messageParcel.readInt() - this.str = messageParcel.readString() - return true - } -} -``` -**4. Implement Callee.on and Callee.off.** - - The time to register a listener for the callee ability depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the ability and deregistered in **onDestroy**. After receiving sequenceable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The code snippet is as follows: -```ts -const TAG: string = '[CalleeAbility]' -const MSG_SEND_METHOD: string = 'CallSendMsg' - -function sendMsgCallback(data) { - console.log('CalleeSortFunc called') - - // Obtain the sequenceable data sent by the caller ability. - let receivedData = new MySequenceable(0, '') - data.readSequenceable(receivedData) - console.log(`receiveData[${receivedData.num}, ${receivedData.str}]`) - - // Process the data. - // Return the sequenceable data result to the caller ability. - return new MySequenceable(receivedData.num + 1, `send ${receivedData.str} succeed`) -} - -export default class CalleeAbility extends Ability { - onCreate(want, launchParam) { - try { - this.callee.on(MSG_SEND_METHOD, sendMsgCallback) - } catch (error) { - console.log(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`) - } - } - - onDestroy() { - try { - this.callee.off(MSG_SEND_METHOD) - } catch (error) { - console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`) - } - } -} -``` +1. **Configure the ability launch type.** + + Set **launchType** of the callee ability to **singleton** in the **module.json5** file. + + |JSON Field|Description| + |:------|:------| + |"launchType"|Ability launch type. Set this parameter to **singleton**.| + + An example of the ability configuration is as follows: + + ```json + "abilities":[{ + "name": ".CalleeAbility", + "srcEntry": "./ets/CalleeAbility/CalleeAbility.ts", + "launchType": "singleton", + "description": "$string:CalleeAbility_desc", + "icon": "$media:icon", + "label": "$string:CalleeAbility_label", + "exported": true + }] + ``` + +2. **Import the UIAbility module.** + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility'; + ``` + +3. **Define the agreed sequenceable data.** + + The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string. The code snippet is as follows: + + ```ts + export default class MySequenceable { + num: number = 0 + str: string = "" + + constructor(num, string) { + this.num = num + this.str = string + } + + marshalling(messageParcel) { + messageParcel.writeInt(this.num) + messageParcel.writeString(this.str) + return true + } + + unmarshalling(messageParcel) { + this.num = messageParcel.readInt() + this.str = messageParcel.readString() + return true + } + } + ``` + +4. **Implement Callee.on and Callee.off.** + + The time to register a listener for the callee ability depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the ability and deregistered in **onDestroy**. After receiving sequenceable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The code snippet is as follows: + + ```ts + const TAG: string = '[CalleeAbility]' + const MSG_SEND_METHOD: string = 'CallSendMsg' + + function sendMsgCallback(data) { + console.log('CalleeSortFunc called') + + // Obtain the sequenceable data sent by the caller ability. + let receivedData = new MySequenceable(0, '') + data.readSequenceable(receivedData) + console.log(`receiveData[${receivedData.num}, ${receivedData.str}]`) + + // Process the data. + // Return the sequenceable data result to the caller ability. + return new MySequenceable(receivedData.num + 1, `send ${receivedData.str} succeed`) + } + + export default class CalleeAbility extends Ability { + onCreate(want, launchParam) { + try { + this.callee.on(MSG_SEND_METHOD, sendMsgCallback) + } catch (error) { + console.log(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`) + } + } + + onDestroy() { + try { + this.callee.off(MSG_SEND_METHOD) + } catch (error) { + console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`) + } + } + } + ``` ### Accessing the Callee Ability -**1. Import the Ability module.** -```ts -import Ability from '@ohos.app.ability.UIAbility' -``` -**2. Obtain the Caller object.** - - The **context** attribute of the ability implements **startAbilityByCall** to obtain the **Caller** object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the **Caller** object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements. The code snippet is as follows: -```ts -// Register the onRelease listener of the caller ability. -private regOnRelease(caller) { - try { - caller.on("release", (msg) => { - console.log(`caller onRelease is called ${msg}`) - }) - console.log('caller register OnRelease succeed') - } catch (error) { - console.log(`caller register OnRelease failed with ${error}`) - } -} - -async onButtonGetCaller() { - try { - this.caller = await context.startAbilityByCall({ - bundleName: 'com.samples.CallApplication', - abilityName: 'CalleeAbility' - }) - if (this.caller === undefined) { - console.log('get caller failed') - return - } - console.log('get caller success') - this.regOnRelease(this.caller) - } catch (error) { - console.log(`get caller failed with ${error}`) - } -} -``` - In the cross-device scenario, you need to specify the ID of the peer device. The code snippet is as follows: -```ts -async onButtonGetRemoteCaller() { - var caller = undefined - var context = this.context - - context.startAbilityByCall({ - deviceId: getRemoteDeviceId(), - bundleName: 'com.samples.CallApplication', - abilityName: 'CalleeAbility' - }).then((data) => { - if (data != null) { - caller = data - console.log('get remote caller success') - // Register the onRelease listener of the caller ability. - caller.on("release", (msg) => { - console.log(`remote caller onRelease is called ${msg}`) - }) - console.log('remote caller register OnRelease succeed') - } - }).catch((error) => { - console.error(`get remote caller failed with ${error}`) - }) -} -``` - Obtain the ID of the peer device from **DeviceManager**. Note that the **getTrustedDeviceListSync** API is open only to system applications. The code snippet is as follows: -```ts -import deviceManager from '@ohos.distributedHardware.deviceManager'; -var dmClass; -function getRemoteDeviceId() { - if (typeof dmClass === 'object' && dmClass != null) { - var list = dmClass.getTrustedDeviceListSync() - if (typeof (list) == 'undefined' || typeof (list.length) == 'undefined') { - console.log("EntryAbility onButtonClick getRemoteDeviceId err: list is null") - return - } - console.log("EntryAbility onButtonClick getRemoteDeviceId success:" + list[0].deviceId) - return list[0].deviceId - } else { - console.log("EntryAbility onButtonClick getRemoteDeviceId err: dmClass is null") - } -} -``` - In the cross-device scenario, your application must also apply for the data synchronization permission from end users. The code snippet is as follows: -```ts -import abilityAccessCtrl from '@ohos.abilityAccessCtrl.d.ts'; - -requestPermission() { - let context = this.context - let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'] - let atManager = abilityAccessCtrl.createAtManager(); - atManager.requestPermissionsFromUser(context, permissions).then((data) => { - console.log("Succeed to request permission from user with data: "+ JSON.stringify(data)) - }).catch((error) => { - console.log("Failed to request permission from user with error: "+ JSON.stringify(error)) - }) -} -``` -**3. Send agreed sequenceable data.** - - The sequenceable data can be sent to the callee ability with or without a return value. The method and sequenceable data must be consistent with those of the callee ability. The following example describes how to send data to the callee ability. The code snippet is as follows: -```ts -const MSG_SEND_METHOD: string = 'CallSendMsg' -async onButtonCall() { - try { - let msg = new MySequenceable(1, 'origin_Msg') - await this.caller.call(MSG_SEND_METHOD, msg) - } catch (error) { - console.log(`caller call failed with ${error}`) - } -} -``` - - In the following, **CallWithResult** is used to send data **originMsg** to the callee ability and assign the data processed by the **CallSendMsg** method to **backMsg**. The code snippet is as follows: -```ts -const MSG_SEND_METHOD: string = 'CallSendMsg' -originMsg: string = '' -backMsg: string = '' -async onButtonCallWithResult(originMsg, backMsg) { - try { - let msg = new MySequenceable(1, originMsg) - const data = await this.caller.callWithResult(MSG_SEND_METHOD, msg) - console.log('caller callWithResult succeed') - - let result = new MySequenceable(0, '') - data.readSequenceable(result) - backMsg(result.str) - console.log(`caller result is [${result.num}, ${result.str}]`) - } catch (error) { - console.log(`caller callWithResult failed with ${error}`) - } -} -``` -**4. Release the Caller object.** - - When the **Caller** object is no longer required, use **release()** to release it. The code snippet is as follows: -```ts -releaseCall() { - try { - this.caller.release() - this.caller = undefined - console.log('caller release succeed') - } catch (error) { - console.log(`caller release failed with ${error}`) - } -} -``` +1. **Import the Ability module.** + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility'; + ``` + +2. **Obtain the Caller object.** + + The **context** attribute of the ability implements **startAbilityByCall** to obtain the **Caller** object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the **Caller** object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements. The code snippet is as follows: + + ```ts + // Register the onRelease listener of the caller ability. + private regOnRelease(caller) { + try { + caller.on("release", (msg) => { + console.log(`caller onRelease is called ${msg}`) + }) + console.log('caller register OnRelease succeed') + } catch (error) { + console.log(`caller register OnRelease failed with ${error}`) + } + } + + async onButtonGetCaller() { + try { + this.caller = await context.startAbilityByCall({ + bundleName: 'com.samples.CallApplication', + abilityName: 'CalleeAbility' + }) + if (this.caller === undefined) { + console.log('get caller failed') + return + } + console.log('get caller success') + this.regOnRelease(this.caller) + } catch (error) { + console.log(`get caller failed with ${error}`) + } + } + ``` + + In the cross-device scenario, you need to specify the ID of the peer device. The code snippet is as follows: + + ```ts + async onButtonGetRemoteCaller() { + var caller = undefined + var context = this.context + + context.startAbilityByCall({ + deviceId: getRemoteDeviceId(), + bundleName: 'com.samples.CallApplication', + abilityName: 'CalleeAbility' + }).then((data) => { + if (data != null) { + caller = data + console.log('get remote caller success') + // Register the onRelease listener of the caller ability. + caller.on("release", (msg) => { + console.log(`remote caller onRelease is called ${msg}`) + }) + console.log('remote caller register OnRelease succeed') + } + }).catch((error) => { + console.error(`get remote caller failed with ${error}`) + }) + } + ``` + + Obtain the ID of the peer device from **DeviceManager**. Note that the **getTrustedDeviceListSync** API is open only to system applications. The code snippet is as follows: + + ```ts + import deviceManager from '@ohos.distributedHardware.deviceManager'; + var dmClass; + function getRemoteDeviceId() { + if (typeof dmClass === 'object' && dmClass != null) { + var list = dmClass.getTrustedDeviceListSync() + if (typeof (list) == 'undefined' || typeof (list.length) == 'undefined') { + console.log("EntryAbility onButtonClick getRemoteDeviceId err: list is null") + return + } + console.log("EntryAbility onButtonClick getRemoteDeviceId success:" + list[0].deviceId) + return list[0].deviceId + } else { + console.log("EntryAbility onButtonClick getRemoteDeviceId err: dmClass is null") + } + } + ``` + + In the cross-device scenario, your application must also apply for the data synchronization permission from end users. The code snippet is as follows: + + ```ts + import abilityAccessCtrl from '@ohos.abilityAccessCtrl.d.ts'; + + requestPermission() { + let context = this.context + let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'] + let atManager = abilityAccessCtrl.createAtManager(); + atManager.requestPermissionsFromUser(context, permissions).then((data) => { + console.log("Succeed to request permission from user with data: "+ JSON.stringify(data)) + }).catch((error) => { + console.log("Failed to request permission from user with error: "+ JSON.stringify(error)) + }) + } + ``` + +3. **Send agreed sequenceable data.** + + The sequenceable data can be sent to the callee ability with or without a return value. The method and sequenceable data must be consistent with those of the callee ability. The following example describes how to send data to the callee ability. The code snippet is as follows: + + ```ts + const MSG_SEND_METHOD: string = 'CallSendMsg' + async onButtonCall() { + try { + let msg = new MySequenceable(1, 'origin_Msg') + await this.caller.call(MSG_SEND_METHOD, msg) + } catch (error) { + console.log(`caller call failed with ${error}`) + } + } + ``` + + In the following, **CallWithResult** is used to send data **originMsg** to the callee ability and assign the data processed by the **CallSendMsg** method to **backMsg**. The code snippet is as follows: + + ```ts + const MSG_SEND_METHOD: string = 'CallSendMsg' + originMsg: string = '' + backMsg: string = '' + async onButtonCallWithResult(originMsg, backMsg) { + try { + let msg = new MySequenceable(1, originMsg) + const data = await this.caller.callWithResult(MSG_SEND_METHOD, msg) + console.log('caller callWithResult succeed') + + let result = new MySequenceable(0, '') + data.readSequenceable(result) + backMsg(result.str) + console.log(`caller result is [${result.num}, ${result.str}]`) + } catch (error) { + console.log(`caller callWithResult failed with ${error}`) + } + } + ``` + +4. **Release the Caller object.** + + When the **Caller** object is no longer required, use **release()** to release it. The code snippet is as follows: + + ```ts + releaseCall() { + try { + this.caller.release() + this.caller = undefined + console.log('caller release succeed') + } catch (error) { + console.log(`caller release failed with ${error}`) + } + } + ``` \ No newline at end of file diff --git a/en/application-dev/ability-deprecated/stage-formextension.md b/en/application-dev/ability-deprecated/stage-formextension.md index bc1c54afe9d2e323f0938bca250f83737df9cbdb..8a0425f4fab41b97cd15ecb9986f77b4a108ae7a 100644 --- a/en/application-dev/ability-deprecated/stage-formextension.md +++ b/en/application-dev/ability-deprecated/stage-formextension.md @@ -135,7 +135,7 @@ To create a widget in the stage model, you need to implement lifecycle callbacks | Name | Description | Data Type | Default Value Allowed | | ----------- | ------------------------------------------------------------ | ---------- | -------------------- | | name | Name of the Extension ability. This field must be specified. | String | No | - | srcEntrance | Path of the Extension ability lifecycle code. This field must be specified.| String | No | + | srcEntry | Path of the Extension ability lifecycle code. This field must be specified.| String | No | | description | Description of the Extension ability. The value can be a string or a resource index to descriptions in multiple languages.| String | Yes (initial value: left empty)| | icon | Index of the Extension ability icon file. | String | Yes (initial value: left empty)| | label | Descriptive information about the Extension ability presented externally. The value can be a string or a resource index to the description.| String | Yes (initial value: left empty)| @@ -150,7 +150,7 @@ To create a widget in the stage model, you need to implement lifecycle callbacks ```json "extensionAbilities": [{ "name": "FormAbility", - "srcEntrance": "./ets/FormAbility/FormAbility.ts", + "srcEntry": "./ets/FormAbility/FormAbility.ts", "label": "$string:form_FormAbility_label", "description": "$string:form_FormAbility_desc", "type": "form", @@ -242,7 +242,7 @@ You should override **onDestroy** to implement widget data deletion. } ``` -For details about how to implement persistent data storage, see [Lightweight Data Store Development](../database/database-preference-guidelines.md). +For details about how to implement persistent data storage, see [Application Data Persistence Overview](../database/app-data-persistence-overview.md). The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary. @@ -366,7 +366,7 @@ You can set router and message events for components on a widget. The router eve 1. Set the **onclick** field in the HML file to **routerEvent** or **messageEvent**, depending on the **actions** settings in the JSON file. 2. Set the router event. - **action**: **"router"**, which indicates a router event. - - **abilityName**: target ability name, for example, **EntryAbility**, which is the default UIAbility name in DevEco Studio for the stage model. + - **abilityName**: target ability name, for example, **EntryAbility**, which is the default main ability name in DevEco Studio for the stage model. - **params**: custom parameters of the target ability. Set them as required. The value can be obtained from **parameters** in **want** used for starting the target ability. For example, in the lifecycle function **onCreate** of the EntryAbility in the stage model, you can obtain **want** and its **parameters** field. 3. Set the message event. - **action**: **"message"**, which indicates a message event. @@ -413,3 +413,5 @@ The code snippet is as follows: } } ``` + + \ No newline at end of file diff --git a/en/application-dev/ability-deprecated/stage-serviceextension.md b/en/application-dev/ability-deprecated/stage-serviceextension.md index aee8f9c8116dffb49956a2bb9a1cad2ad263a166..8f77e3251d56ff8023d8215546a38b0614f5c8b3 100644 --- a/en/application-dev/ability-deprecated/stage-serviceextension.md +++ b/en/application-dev/ability-deprecated/stage-serviceextension.md @@ -33,8 +33,8 @@ OpenHarmony does not support creation of a Service Extension ability for third-p "icon": "$media:icon", "description": "service", "type": "service", - "visible": true, - "srcEntrance": "./ets/ServiceExtAbility/ServiceExtAbility.ts" + "exported": true, + "srcEntry": "./ets/ServiceExtAbility/ServiceExtAbility.ts" }] ``` diff --git a/en/application-dev/application-dev-guide-for-gitee.md b/en/application-dev/application-dev-guide-for-gitee.md index ca206e65fd11a48631e950f26c1c9b656f298e13..1ad5989d2cf8258c46e219a239a2c8c5a1d1274c 100644 --- a/en/application-dev/application-dev-guide-for-gitee.md +++ b/en/application-dev/application-dev-guide-for-gitee.md @@ -24,6 +24,8 @@ First thing first, familiarize yourself with the two cornerstone frameworks in O All applications should be developed on top of these frameworks. Then, equip yourself for developing the key features, with the following guidelines: + +- [Web](web/web-component-overview.md) - [Notification](notification/Readme-EN.md) - [Window Manager](windowmanager/Readme-EN.md) - [WebGL](webgl/Readme-EN.md) @@ -32,6 +34,7 @@ Then, equip yourself for developing the key features, with the following guideli - [Connectivity](connectivity/Readme-EN.md) - [Telephony Service](telephony/Readme-EN.md) - [Data Management](database/Readme-EN.md) +- [File Management](file-management/Readme-EN.md) - [Task Management](task-management/Readme-EN.md) - [Device Management](device/Readme-EN.md) - [Device Usage Statistics](device-usage-statistics/Readme-EN.md) @@ -40,7 +43,6 @@ Then, equip yourself for developing the key features, with the following guideli - [Application Test](application-test/Readme-EN.md) - [IDL Specifications and User Guide](IDL/idl-guidelines.md) - [Using Native APIs in Application Projects](napi/Readme-EN.md) -- [File Management](file-management/medialibrary-overview.md) ### Tools @@ -70,3 +72,5 @@ They are organized as follows: ### Readme For details about the principles and basic information of each subsystem, see the README file in [docs/en/readme](../readme). + + \ No newline at end of file diff --git a/en/application-dev/application-dev-guide.md b/en/application-dev/application-dev-guide.md index c7b49ac56b0638e8c4ba9908582683f9c4c46d21..8170d075cf08e8258b7c8b3731661f0e4959c6aa 100644 --- a/en/application-dev/application-dev-guide.md +++ b/en/application-dev/application-dev-guide.md @@ -4,7 +4,7 @@ The application development documents provide reference for you to develop appli The documents are carefully organized as follows: -### Getting Started +## Getting Started [Here](quick-start/start-overview.md) you'll learn how to quickly get started with OpenHarmony application development. @@ -12,7 +12,7 @@ Browse the documents on the instructions for quickly building your first applica Check out the development fundamentals, which comprise descriptions of the package structure configuration file for OpenHarmony applications and the instructions for use of resource files. -### Development +## Development To facilitate your application development, we provide development guidelines for key features. @@ -24,14 +24,17 @@ First thing first, familiarize yourself with the two cornerstone frameworks in O All applications should be developed on top of these frameworks. Then, equip yourself for developing the key features, with the following guidelines: + +- [Web](web/web-component-overview.md) - [Notification](notification/notification-overview.md) - [Window Manager](windowmanager/window-overview.md) - [WebGL](webgl/webgl-overview.md) -- [Media](media/audio-overview.md) +- [Media](media/media-application-overview.md) - [Security](security/userauth-overview.md) - [Connectivity](connectivity/ipc-rpc-overview.md) - [Telephony Service](telephony/telephony-overview.md) -- [Data Management](database/database-mdds-overview.md) +- [Data Management](database/data-mgmt-overview.md) +- [File Management](file-management/file-management-overview.md) - [Task Management](task-management/background-task-overview.md) - [Device](device/usb-overview.md) - [Device Usage Statistics](device-usage-statistics/device-usage-statistics-overview.md) @@ -40,32 +43,29 @@ Then, equip yourself for developing the key features, with the following guideli - [Application Test](application-test/arkxtest-guidelines.md) - [OpenHarmony IDL Specifications and User Guide](IDL/idl-guidelines.md) - [Using Native APIs in Application Projects](napi/napi-guidelines.md) -- [File Management](file-management/medialibrary-overview.md) -### Tools +## Tools DevEco Studio is a high-performance integrated development environment (IDE) recommended for developing OpenHarmony applications. [Here](https://developer.harmonyos.com/en/docs/documentation/doc-guides/ohos-deveco-studio-overview-0000001263280421) you can learn everything about DevEco Studio, including how to use this tool to create a project and sign, debug, and run an application. -### Hands-On Tutorials +## Hands-On Tutorials To make you better understand how functions work together and jumpstart your application development projects, we provide stripped-down, real-world [samples](https://gitee.com/openharmony/applications_app_samples/blob/master/README.md) and [codelabs](https://gitee.com/openharmony/codelabs). -### API References +## API References API references encompass all components and APIs available in OpenHarmony, helping you use and integrate APIs more effectively. They are organized as follows: -- [Component Reference (TypeScript-based Declarative Development Paradigm)](reference/arkui-ts/Readme-EN.md) - -- [Component Reference (JavaScript-based Web-like Development Paradigm)](reference/arkui-js/Readme-EN.md) - -- [JS Service Widget UI Components](reference/js-service-widget-ui/Readme-EN.md) - -- [JS and TS APIs](reference/apis/js-apis-ability-dataUriUtils.md) - +- [Component Reference (TypeScript-based Declarative Development Paradigm)](reference/arkui-ts/ts-components-summary.md) +- [Component Reference (JavaScript-compatible Web-like Development Paradigm-ArkUI.Full)](reference/arkui-js/js-components-common-attributes.md) +- [Component Reference (JavaScript-compatible Web-like Development Paradigm-ArkUI.Lite)](reference/arkui-js-lite/js-framework-file.md) +- [JS Service Widget UI Components](reference/js-service-widget-ui/js-service-widget-file.md) +- [JS and TS APIs](reference/apis/development-intro.md) - Native APIs - [Standard Library](reference/native-lib/third_party_libc/musl.md) - - [Node_API](reference/native-lib/third_party_napi/napi.md) + - [Node_API](reference/native-lib/third_party_napi/napi.md) + \ No newline at end of file diff --git a/en/application-dev/application-models/Readme-EN.md b/en/application-dev/application-models/Readme-EN.md index 2c1505fc1c12e57a96f22fcda7faf92ba9ea7418..65f2b4c16ea42ecdf37082a5a9f8e26eb20dd6e6 100644 --- a/en/application-dev/application-models/Readme-EN.md +++ b/en/application-dev/application-models/Readme-EN.md @@ -18,11 +18,35 @@ - [ExtensionAbility Component Overview](extensionability-overview.md) - [ServiceExtensionAbility](serviceextensionability.md) - [DataShareExtensionAbility (for System Applications Only)](datashareextensionability.md) - - [FormExtensionAbility (Widget)](widget-development-stage.md) - [AccessibilityExtensionAbility](accessibilityextensionability.md) - [EnterpriseAdminExtensionAbility](enterprise-extensionAbility.md) - [InputMethodExtensionAbility](inputmethodextentionability.md) - [WindowExtensionAbility](windowextensionability.md) + - Service Widget Development in Stage Model + - [Service Widget Overview](service-widget-overview.md) + - Developing an ArkTS Widget + - [ArkTS Widget Working Principles](arkts-ui-widget-working-principles.md) + - [ArkTS Widget Related Modules](arkts-ui-widget-modules.md) + - ArkTS Widget Development + - [Creating an ArkTS Widget](arkts-ui-widget-creation.md) + - [Configuring Widget Configuration Files](arkts-ui-widget-configuration.md) + - [Widget Lifecycle Management](arkts-ui-widget-lifecycle.md) + - Widget Page Development + - [Widget Page Capability Overview](arkts-ui-widget-page-overview.md) + - [Using Animations in the Widget](arkts-ui-widget-page-animation.md) + - [Applying Custom Drawing in the Widget](arkts-ui-widget-page-custom-drawing.md) + - Widget Event Development + - [Widget Event Capability Overview](arkts-ui-widget-event-overview.md) + - [Updating Widget Content Through FormExtensionAbility](arkts-ui-widget-event-formextensionability.md) + - [Updating Widget Content Through UIAbility](arkts-ui-widget-event-uiability.md) + - [Redirecting to a Specified Page Through the Router Event](arkts-ui-widget-event-router.md) + - Widget Data Interaction + - [Widget Data Interaction Overview](arkts-ui-widget-interaction-overview.md) + - [Configuring a Widget to Update Periodically](arkts-ui-widget-update-by-time.md) + - [Updating Local and Online Images in the Widget](arkts-ui-widget-image-update.md) + - [Updating Widget Content by State](arkts-ui-widget-update-by-status.md) + - [Updating Widget Content by Widget Host (for System Applications Only)](arkts-ui-widget-content-update.md) + - [Developing a JS Widget](js-ui-widget-development.md) - [AbilityStage Component Container](abilitystage.md) - [Context](application-context-stage.md) - Want @@ -46,8 +70,9 @@ - [Common Event Subscription Overview](common-event-subscription-overview.md) - [Subscribing to Common Events in Dynamic Mode](common-event-subscription.md) - [Subscribing to Common Events in Static Mode (for System Applications Only)](common-event-static-subscription.md) - - [Unsubscribing from Common Events](common-event-unsubscription.md) + - [Unsubscribing from Common Events](common-event-unsubscription.md) - [Publishing Common Events](common-event-publish.md) + - [Removing Sticky Common Events](common-event-remove-sticky.md) - [Background Services](background-services.md) - Inter-Thread Communication - [Thread Model](thread-model-stage.md) diff --git a/en/application-dev/application-models/ability-startup-with-implicit-want.md b/en/application-dev/application-models/ability-startup-with-implicit-want.md index 231610ad52a5ff53ce75b96277a18e4430c65d87..dbd65bb560d7531bb6e00b21c004815fda1a997c 100644 --- a/en/application-dev/application-models/ability-startup-with-implicit-want.md +++ b/en/application-dev/application-models/ability-startup-with-implicit-want.md @@ -27,8 +27,7 @@ This section uses the operation of using a browser to open a website as an examp "host": "www.test.com", "port": "8080", // Prefix matching is used. - "pathStartWith": "query", - "type": "text/*" + "pathStartWith": "query" }, { "scheme": "http", @@ -53,12 +52,11 @@ function implicitStartAbility() { let context = getContext(this) as common.UIAbilityContext; let wantInfo = { // Uncomment the line below if you want to implicitly query data only in the specific bundle. - // bundleName: "com.example.myapplication", - "action": "ohos.want.action.viewData", + // bundleName: 'com.example.myapplication', + 'action': 'ohos.want.action.viewData', // entities can be omitted. - "entities": ["entity.system.browsable"], - "uri": "https://www.test.com:8080/query/student", - "type": "text/plain" + 'entities': ['entity.system.browsable'], + 'uri': 'https://www.test.com:8080/query/student' } context.startAbility(wantInfo).then(() => { // ... @@ -75,6 +73,6 @@ The matching process is as follows: 3. If **uri** in the passed **want** parameter is included in **uris** under **skills** of the ability to match, which is concatenated into https://www.test.com:8080/query* (where * is a wildcard), the matching is successful. 4. If **type** in the passed **want** parameter is specified and is included in **type** under **skills** of the ability to match, the matching is successful. -When there are multiple matching applications, a dialog box is displayed for you to select one of them. The following figure shows an example. +If there are multiple matching applications, the system displays a dialog box for you to select one of them. The following figure shows an example. ![](figures/ability-startup-with-implicit-want1.png) \ No newline at end of file diff --git a/en/application-dev/application-models/abilitystage.md b/en/application-dev/application-models/abilitystage.md index 9a4e71d3fa696ee6f2707545b80456df34fe85ac..769c6b4540856a553ca30f02c0a689e1c32f2307 100644 --- a/en/application-dev/application-models/abilitystage.md +++ b/en/application-dev/application-models/abilitystage.md @@ -12,7 +12,7 @@ AbilityStage is not automatically generated in the default project of DevEco Stu 1. In the **ets** directory of the **Module** project, right-click and choose **New > Directory** to create a directory named **myabilitystage**. -2. In the **myabilitystage** directory, right-click and choose **New > ts File** to create a file named **MyAbilityStage.ts**. +2. In the **myabilitystage** directory, right-click and choose **New > TypeScript File** to create a file named **MyAbilityStage.ts**. 3. Open the **MyAbilityStage.ts** file, and import the dependency package of AbilityStage. Customize a class that inherits from AbilityStage, and add the required lifecycle callbacks. The following code snippet adds the **onCreate()** lifecycle callback. @@ -20,17 +20,17 @@ AbilityStage is not automatically generated in the default project of DevEco Stu import AbilityStage from '@ohos.app.ability.AbilityStage'; export default class MyAbilityStage extends AbilityStage { - onCreate() { - // When the HAP of the application is loaded for the first time, initialize the module. - } - onAcceptWant(want) { - // Triggered only for the ability with the specified launch type. - return "MyAbilityStage"; - } + onCreate() { + // When the HAP of the application is loaded for the first time, initialize the module. + } + onAcceptWant(want) { + // Triggered only for the ability with the specified launch type. + return "MyAbilityStage"; + } } ``` -4. Set **srcEntry** in the [module.json5 file](../quick-start/module-configuration-file.md) to the code path of the module. +4. In the [module.json5 file](../quick-start/module-configuration-file.md), set **srcEntry** to specify the code path of the module as the entry for loading the HAP. ```json { "module": { @@ -42,7 +42,6 @@ AbilityStage is not automatically generated in the default project of DevEco Stu } ``` - [AbilityStage](../reference/apis/js-apis-app-ability-abilityStage.md) has the lifecycle callback [onCreate()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageoncreate) and the event callbacks [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant), [onConfigurationUpdated()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonconfigurationupdate), and [onMemoryLevel()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonmemorylevel). @@ -53,7 +52,6 @@ AbilityStage is not automatically generated in the default project of DevEco Stu - **onConfigurationUpdated()** event callback: triggered when the global system configuration changes. The global system configuration, such as the system language and theme, are defined in the [Configuration](../reference/apis/js-apis-app-ability-configuration.md) class before project configuration. - **onMemoryLevel()** event callback: triggered when the system adjusts the memory. - When an application is switched to the background, it is cached in the background. This adversely affects the overall system performance. When system resources are insufficient, the system reclaims memory from applications in multiple ways. For example, the system may stop applications to release memory for executing key tasks. To further maintain the balance of the system memory and prevent the system from stopping application processes, you can subscribe to the system memory changes in the **onMemoryLevel()** lifecycle callback of AbilityStage to release unnecessary resources. @@ -62,8 +60,8 @@ When an application is switched to the background, it is cached in the backgroun import AbilityStage from '@ohos.app.ability.AbilityStage'; export default class MyAbilityStage extends AbilityStage { - onMemoryLevel(level) { - // Release unnecessary memory based on the change of available system memory. - } + onMemoryLevel(level) { + // Release unnecessary memory based on the change of available system memory. + } } ``` diff --git a/en/application-dev/application-models/application-context-stage.md b/en/application-dev/application-models/application-context-stage.md index de07a3600a27b619f144a4f22223e17616f80805..cc19530d99ca4bf2005fcb4b5084c9e83b445193 100644 --- a/en/application-dev/application-models/application-context-stage.md +++ b/en/application-dev/application-models/application-context-stage.md @@ -19,10 +19,10 @@ ```ts import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - let uiAbilityContext = this.context; - // ... - } + onCreate(want, launchParam) { + let uiAbilityContext = this.context; + // ... + } } ``` @@ -34,21 +34,21 @@ ```ts import ServiceExtensionAbility from '@ohos.app.ability.ServiceExtensionAbility'; export default class MyService extends ServiceExtensionAbility { - onCreate(want) { - let serviceExtensionContext = this.context; - // ... - } + onCreate(want) { + let serviceExtensionContext = this.context; + // ... + } } ``` - [AbilityStageContext](../reference/apis/js-apis-inner-application-abilityStageContext.md): module-level context. It provides **HapModuleInfo** and **Configuration** in addition to those provided by the base class **Context**. ```ts - import AbilityStage from "@ohos.app.ability.AbilityStage"; + import AbilityStage from '@ohos.app.ability.AbilityStage'; export default class MyAbilityStage extends AbilityStage { - onCreate() { - let abilityStageContext = this.context; - // ... - } + onCreate() { + let abilityStageContext = this.context; + // ... + } } ``` - [ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext.md): application-level context. It provides APIs for subscribing to application component lifecycle changes, system memory changes, and system environment changes. The application-level context can be obtained from UIAbility, ExtensionAbility, and AbilityStage. @@ -56,10 +56,10 @@ ```ts import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - let applicationContext = this.context.getApplicationContext(); - // ... - } + onCreate(want, launchParam) { + let applicationContext = this.context.getApplicationContext(); + // ... + } } ``` @@ -71,7 +71,7 @@ This topic describes how to use the context in the following scenarios: - [Obtaining the Application Development Path](#obtaining-the-application-development-path) -- [Obtaining and Modifying Encrypted Areas](#obtaining-and-modifying-encrypted-areas) +- [Obtaining and Modifying Encryption Areas](#obtaining-and-modifying-encryption-areas) - [Creating Context of Another Application or Module](#creating-context-of-another-application-or-module) - [Subscribing to UIAbility Lifecycle Changes in a Process](#subscribing-to-uiability-lifecycle-changes-in-a-process) @@ -84,13 +84,13 @@ The following table describes the application development paths obtained from co | Name| Type| Readable| Writable| Description| | -------- | -------- | -------- | -------- | -------- | -| cacheDir | string | Yes| No| Cache directory of the application on the internal storage.
It is the content of **Storage** of an application under **Settings > Apps & services > Apps**.| -| tempDir | string | Yes| No| Temporary file directory of the application.
Files in this directory are deleted after the application is uninstalled.| -| filesDir | string | Yes| No| File directory of the application on the internal storage.
Files in this directory may be synchronized to other directories during application migration or backup.| -| databaseDir | string | Yes| No| Storage directory of the local database.| -| bundleCodeDir | string | Yes| No| Installation directory of the application on the internal storage.| -| distributedFilesDir | string | Yes| No| Storage directory of distributed application data files.| -| preferencesDir | string | Yes| Yes| Preferences directory of the application.| +| bundleCodeDir | string | Yes | No | Path for storing the application's installation package, that is, installation directory of the application on the internal storage. Do not access resource files by concatenating paths. Use [@ohos.resourceManager] instead. | +| cacheDir | string | Yes| No| Path for storing the application's cache files, that is, cache directory of the application on the internal storage.
It is the content of **Storage** of an application under **Settings > Apps & services > Apps**.| +| filesDir | string | Yes | No | Path for storing the application's common files, that is, file directory of the application on the internal storage.
Files in this directory may be synchronized to other directories during application migration or backup.| +| preferencesDir | string | Yes | Yes | Path for storing the application's preference files, that is, preferences directory of the application. | +| tempDir | string | Yes | No | Path for storing the application's temporary files.
Files in this directory are deleted after the application is uninstalled.| +| databaseDir | string | Yes | No | Path for storing the application's database, that is, storage directory of the local database. | +| distributedFilesDir | string | Yes| No| Path for storing the application's distributed files.| The capability of obtaining the application development path is provided by the base class **Context**. This capability is also provided by **ApplicationContext**, **AbilityStageContext**, **UIAbilityContext**, and **ExtensionContext**. However, the paths obtained from different contexts may differ, as shown below. @@ -127,16 +127,16 @@ The sample code for obtaining the application development paths is as follows: import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - let cacheDir = this.context.cacheDir; - let tempDir = this.context.tempDir; - let filesDir = this.context.filesDir; - let databaseDir = this.context.databaseDir; - let bundleCodeDir = this.context.bundleCodeDir; - let distributedFilesDir = this.context.distributedFilesDir; - let preferencesDir = this.context.preferencesDir; - // ... - } + onCreate(want, launchParam) { + let cacheDir = this.context.cacheDir; + let tempDir = this.context.tempDir; + let filesDir = this.context.filesDir; + let databaseDir = this.context.databaseDir; + let bundleCodeDir = this.context.bundleCodeDir; + let distributedFilesDir = this.context.distributedFilesDir; + let preferencesDir = this.context.preferencesDir; + // ... + } } ``` @@ -144,59 +144,66 @@ export default class EntryAbility extends UIAbility { > > The sample code obtains the sandbox path of the application development path. The absolute path can be obtained by running the **find / -name ** command in the hdc shell after file creation or modification. -### Obtaining and Modifying Encrypted Areas +### Obtaining and Modifying Encryption Areas -You can read and write [the area attribute in the context](../reference/apis/js-apis-inner-application-context.md) to obtain and set an encrypted area. Two encryption levels are supported: +Encrypting application files enhances data security by preventing files from unauthorized access. Different application files require different levels of protection. For private files, such as alarms and wallpapers, the application must place them in the device-level encryption area (EL1) to ensure that they can be accessed before the user enters the password. For sensitive files, such as personal privacy data, the application must place them in the user-level encryption area (EL2). -- AreaMode.EL1: device-level encryption area, which is accessible after the device is powered on. +In practice, you need to select a proper encrypted area based on scenario-specific requirements to protect application data security. The proper use of EL1 and the EL2 can efficiently improve the security. -- AreaMode.EL2: user-level encryption area, which is accessible only after the device is powered on and the password is entered (for the first time). +> **NOTE** +> +> - AreaMode.EL1: device-level encryption area, which is accessible after the device is powered on. +> +> - AreaMode.EL2: user-level encryption area, which is accessible only after the device is powered on and the password is entered (for the first time). + +You can obtain and set the encryption area by reading and writing the [area attribute in Context](../reference/apis/js-apis-inner-application-context.md). ```ts import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - // Before storing common information, switch the encryption level to EL1. - if (this.context.area === 1) {// Obtain the area. - this.context.area = 0; // Modify the area. - } - // Store common information. - - // Before storing sensitive information, switch the encryption level to EL2. - if (this.context.area === 0) { // Obtain the area. - this.context.area = 1; // Modify the area. - } - // Store sensitive information. + onCreate(want, launchParam) { + // Before storing common information, switch the encryption level to EL1. + if (this.context.area === 1) {// Obtain the area. + this.context.area = 0; // Modify the area. + } + // Store common information. + + // Before storing sensitive information, switch the encryption level to EL2. + if (this.context.area === 0) { // Obtain the area. + this.context.area = 1; // Modify the area. } + // Store sensitive information. + } } ``` ### Creating Context of Another Application or Module -The base class **Context** provides the [createBundleContext(bundleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatebundlecontext), [createModuleContext(moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext), and [createModuleContext(bundleName:string, moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext-1) methods for creating the context of other applications or modules, so as to obtain the resource information, for example, [obtaining the application development paths](#obtaining-the-application-development-path) of other modules. +The base class **Context** provides [createBundleContext(bundleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatebundlecontext), [createModuleContext(moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext), and [createModuleContext(bundleName:string, moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext-1) to create the context of other applications or modules, so as to obtain the resource information, for example, [obtaining the application development paths](#obtaining-the-application-development-path) of other modules. - Call **createBundleContext(bundleName:string)** to create the context of another application. > **NOTE** > > To obtain the context of another application: - > - > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + > + > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + > > - This is a system API and cannot be called by third-party applications. For example, application information displayed on the home screen includes the application name and icon. The home screen application calls the foregoing method to obtain the context information, so as to obtain the resource information including the application name and icon. - + ```ts import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - let bundleName2 = "com.example.application"; - let context2 = this.context.createBundleContext(bundleName2); - let label2 = context2.applicationInfo.label; - // ... - } + onCreate(want, launchParam) { + let bundleName2 = 'com.example.application'; + let context2 = this.context.createBundleContext(bundleName2); + let label2 = context2.applicationInfo.label; + // ... + } } ``` @@ -205,99 +212,113 @@ The base class **Context** provides the [createBundleContext(bundleName:string)] > > To obtain the context of a specified module of another application: > - > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + > > - This is a system API and cannot be called by third-party applications. - + ```ts import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - let bundleName2 = "com.example.application"; - let moduleName2 = "module1"; - let context2 = this.context.createModuleContext(bundleName2, moduleName2); - // ... - } + onCreate(want, launchParam) { + let bundleName2 = 'com.example.application'; + let moduleName2 = 'module1'; + let context2 = this.context.createModuleContext(bundleName2, moduleName2); + // ... + } } ``` - + - Call **createModuleContext(moduleName:string)** to obtain the context of another module in the current application. After obtaining the context, you can obtain the resource information of that module. ```ts import UIAbility from '@ohos.app.ability.UIAbility'; export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - let moduleName2 = "module1"; - let context2 = this.context.createModuleContext(moduleName2); - // ... - } + onCreate(want, launchParam) { + let moduleName2 = 'module1'; + let context2 = this.context.createModuleContext(moduleName2); + // ... + } } ``` ### Subscribing to UIAbility Lifecycle Changes in a Process -In the DFX statistics scenario of an application, if you need to collect statistics on the stay duration and access frequency of a page, you can subscribe to UIAbility lifecycle changes. +In the DFX statistics scenario of an application, if you need to collect statistics on the stay duration and access frequency of a page, you can subscribe to UIAbility lifecycle changes in a process. -[ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext.md) provides APIs for subscribing to UIAbility lifecycle changes in a process. When the UIAbility lifecycle changes in a process, for example, being created or destroyed, becoming visible or invisible, or gaining or losing focus, the corresponding callback is triggered, and a listener ID is returned. The ID is incremented by 1 each time the listener is registered. When the number of listeners exceeds the upper limit (2^63-1), -1 is returned. The following uses [UIAbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md) as an example. +[ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext.md) provides APIs for subscribing to UIAbility lifecycle changes in a process. When the UIAbility lifecycle changes in a process, for example, being created or destroyed, becoming visible or invisible, or gaining or losing focus, the corresponding callback is triggered. Each time the callback is registered, a listener lifecycle ID is returned, with the value incremented by 1 each time. When the number of listeners exceeds the upper limit (2^63-1), **-1** is returned. The following uses [UIAbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md) as an example. ```ts import UIAbility from '@ohos.app.ability.UIAbility'; import window from '@ohos.window'; -const TAG: string = "[Example].[Entry].[EntryAbility]"; +const TAG: string = '[Example].[Entry].[EntryAbility]'; export default class EntryAbility extends UIAbility { - lifecycleId: number; - - onCreate(want, launchParam) { - let abilityLifecycleCallback = { - onAbilityCreate(uiability) { - console.info(TAG, "onAbilityCreate uiability:" + JSON.stringify(uiability)); - }, - onWindowStageCreate(uiability, windowStage) { - console.info(TAG, "onWindowStageCreate uiability:" + JSON.stringify(uiability)); - console.info(TAG, "onWindowStageCreate windowStage:" + JSON.stringify(windowStage)); - }, - onWindowStageActive(uiability, windowStage) { - console.info(TAG, "onWindowStageActive uiability:" + JSON.stringify(uiability)); - console.info(TAG, "onWindowStageActive windowStage:" + JSON.stringify(windowStage)); - }, - onWindowStageInactive(uiability, windowStage) { - console.info(TAG, "onWindowStageInactive uiability:" + JSON.stringify(uiability)); - console.info(TAG, "onWindowStageInactive windowStage:" + JSON.stringify(windowStage)); - }, - onWindowStageDestroy(uiability, windowStage) { - console.info(TAG, "onWindowStageDestroy uiability:" + JSON.stringify(uiability)); - console.info(TAG, "onWindowStageDestroy windowStage:" + JSON.stringify(windowStage)); - }, - onAbilityDestroy(uiability) { - console.info(TAG, "onAbilityDestroy uiability:" + JSON.stringify(uiability)); - }, - onAbilityForeground(uiability) { - console.info(TAG, "onAbilityForeground uiability:" + JSON.stringify(uiability)); - }, - onAbilityBackground(uiability) { - console.info(TAG, "onAbilityBackground uiability:" + JSON.stringify(uiability)); - }, - onAbilityContinue(uiability) { - console.info(TAG, "onAbilityContinue uiability:" + JSON.stringify(uiability)); - } - } - // 1. Obtain the application context through the context attribute. - let applicationContext = this.context.getApplicationContext(); - // 2. Register a listener for the lifecycle changes through the application context. - this.lifecycleId = applicationContext.on("abilityLifecycle", abilityLifecycleCallback); - console.info(TAG, "register callback number: " + JSON.stringify(this.lifecycleId)); + // Define a lifecycle ID. + lifecycleId: number; + + onCreate(want, launchParam) { + // Define a lifecycle callback object. + let abilityLifecycleCallback = { + // Called when a UIAbility is created. + onAbilityCreate(uiAbility) { + console.log(TAG, `onAbilityCreate uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + }, + // Called when a window is created. + onWindowStageCreate(uiAbility, windowStage: window.WindowStage) { + console.log(TAG, `onWindowStageCreate uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + console.log(TAG, `onWindowStageCreate windowStage: ${JSON.stringify(windowStage)}`); + }, + // Called when the window becomes active. + onWindowStageActive(uiAbility, windowStage: window.WindowStage) { + console.log(TAG, `onWindowStageActive uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + console.log(TAG, `onWindowStageActive windowStage: ${JSON.stringify(windowStage)}`); + }, + // Called when the window becomes inactive. + onWindowStageInactive(uiAbility, windowStage: window.WindowStage) { + console.log(TAG, `onWindowStageInactive uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + console.log(TAG, `onWindowStageInactive windowStage: ${JSON.stringify(windowStage)}`); + }, + // Called when the window is destroyed. + onWindowStageDestroy(uiAbility, windowStage: window.WindowStage) { + console.log(TAG, `onWindowStageDestroy uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + console.log(TAG, `onWindowStageDestroy windowStage: ${JSON.stringify(windowStage)}`); + }, + // Called when the UIAbility is destroyed. + onAbilityDestroy(uiAbility) { + console.log(TAG, `onAbilityDestroy uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + }, + // Called when the UIAbility is switched from the background to the foreground. + onAbilityForeground(uiAbility) { + console.log(TAG, `onAbilityForeground uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + }, + // Called when the UIAbility is switched from the foreground to the background. + onAbilityBackground(uiAbility) { + console.log(TAG, `onAbilityBackground uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + }, + // Called when UIAbility is continued on another device. + onAbilityContinue(uiAbility) { + console.log(TAG, `onAbilityContinue uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`); + } } + // Obtain the application context. + let applicationContext = this.context.getApplicationContext(); + // Register the application lifecycle callback. + this.lifecycleId = applicationContext.on('abilityLifecycle', abilityLifecycleCallback); + console.log(TAG, `register callback number: ${this.lifecycleId}`); + } - onDestroy() { - let applicationContext = this.context.getApplicationContext(); - applicationContext.off("abilityLifecycle", this.lifecycleId, (error, data) => { - console.info(TAG, "unregister callback success, err: " + JSON.stringify(error)); - }); - } + // ... + + onDestroy() { + // Obtain the application context. + let applicationContext = this.context.getApplicationContext(); + // Deregister the application lifecycle callback. + applicationContext.off('abilityLifecycle', this.lifecycleId); + } } ``` diff --git a/en/application-dev/application-models/arkts-ui-widget-configuration.md b/en/application-dev/application-models/arkts-ui-widget-configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..7e438ce4215a583c4ad7ccebc8cfc591b5251ad6 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-configuration.md @@ -0,0 +1,84 @@ +# Configuring Widget Configuration Files + + +Widget-related configuration includes **FormExtensionAbility** configuration and widget configuration. + + +1. Configure FormExtensionAbility information under **extensionAbilities** in the [module.json5 file](../quick-start/module-configuration-file.md). For a FormExtensionAbility, you must specify **metadata**. Specifically, set **name** to **ohos.extension.form** (fixed), and set **resource** to the index of the widget configuration information. + + Example configuration: + + + ```json + { + "module": { + ... + "extensionAbilities": [ + { + "name": "EntryFormAbility", + "srcEntry": "./ets/entryformability/EntryFormAbility.ts", + "label": "$string:EntryFormAbility_label", + "description": "$string:EntryFormAbility_desc", + "type": "form", + "metadata": [ + { + "name": "ohos.extension.form", + "resource": "$profile:form_config" + } + ] + } + ] + } + } + ``` + +2. Configure the widget configuration information. In the **metadata** configuration item of FormExtensionAbility, you can specify the resource index of specific configuration information of the widget. For example, if resource is set to **$profile:form_config**, **form_config.json** in the **resources/base/profile/** directory of the development view is used as the profile configuration file of the widget. The following table describes the internal field structure. + + **Table 1** form_config.json file + + | Field| Description| Data Type| Default Value Allowed| + | -------- | -------- | -------- | -------- | + | name | Class name of the widget. The value is a string with a maximum of 127 bytes.| String| No| + | description | Description of the widget. The value can be a string or a resource index to descriptions in multiple languages. The value is a string with a maximum of 255 bytes.| String| Yes (initial value: left empty)| + | src | Full path of the UI code corresponding to the widget. For an ArkTS widget, the full path must contain the widget file name extension, for example, **./ets/widget/pages/WidgetCard.ets**. For a JS widget, the full path does not need to contain the widget file name extension, for example, **./js/widget/pages/WidgetCard**.| String| No| + | uiSyntax | Type of the widget.
- **arkts**: ArkTS widget
- **hml**: JS widget| String| Yes (initial value: **hml**)| + | window | Window-related configurations.| Object| Yes| + | isDefault | Whether the widget is a default one. Each UIAbility has only one default widget.
- **true**: The widget is the default one.
- **false**: The widget is not the default one.| Boolean| No| + | colorMode | Color mode of the widget.
- **auto**: auto-adaptive color mode
- **dark**: dark color mode
- **light**: light color mode| String| Yes (initial value: **auto**)| + | supportDimensions | Grid styles supported by the widget.
- **1 * 2**: indicates a grid with one row and two columns.
- **2 * 2**: indicates a grid with two rows and two columns.
- **2 * 4**: indicates a grid with two rows and four columns.
- **4 * 4**: indicates a grid with four rows and four columns.| String array| No| + | defaultDimension | Default grid style of the widget. The value must be available in the **supportDimensions** array of the widget.| String| No| + | updateEnabled | Whether the widget can be updated periodically.
- **true**: The widget can be updated at a specified interval (**updateDuration**) or at the scheduled time (**scheduledUpdateTime**). **updateDuration** takes precedence over **scheduledUpdateTime**.
- **false**: The widget cannot be updated periodically.| Boolean| No| + | scheduledUpdateTime | Scheduled time to update the widget. The value is in 24-hour format and accurate to minute.
**NOTE**
**updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used.| String| Yes (initial value: The widget cannot be updated periodically.)| + | updateDuration | Interval to update the widget. The value is a natural number, in the unit of 30 minutes.
If the value is **0**, this field does not take effect.
If the value is a positive integer *N*, the interval is calculated by multiplying *N* and 30 minutes.
**NOTE**
**updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used.| Number| Yes (initial value: **0**)| + | formConfigAbility | Link to a specific page of the application. The value is a URI.| String| Yes (initial value: left empty)| + | formVisibleNotify | Whether the widget is allowed to use the widget visibility notification.| String| Yes (initial value: left empty)| + | metadata | Metadata of the widget. This field contains the array of the **customizeData** field.| Object| Yes (initial value: left empty)| + + Example configuration: + + + ```json + { + "forms": [ + { + "name": "widget", + "description": "This is a service widget.", + "src": "./ets/widget/pages/WidgetCard.ets", + "uiSyntax": "arkts", + "window": { + "designWidth": 720, + "autoDesignWidth": true + }, + "colorMode": "auto", + "isDefault": true, + "updateEnabled": true, + "scheduledUpdateTime": "10:30", + "updateDuration": 1, + "defaultDimension": "2*2", + "supportDimensions": [ + "2*2" + ] + } + ] + } + ``` diff --git a/en/application-dev/application-models/arkts-ui-widget-content-update.md b/en/application-dev/application-models/arkts-ui-widget-content-update.md new file mode 100644 index 0000000000000000000000000000000000000000..c0f4c840b6c11b497405ce8777a04317b5ffca4d --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-content-update.md @@ -0,0 +1,30 @@ +# Updating Widget Content by Widget Host (for System Applications Only) + + +Widgets that are updated periodically are subject to the scheduled time or interval settings. To offer more flexible updates, the widget host can provide a button to proactively trigger a widget update. Specifically, the widget host calls the [requestForm](../reference/apis/js-apis-app-form-formHost.md#requestform) API to request a widget update. The system then calls the [onUpdateForm](../reference/apis/js-apis-app-form-formExtensionAbility.md#onupdateform) lifecycle callback in the FormExtensionAbility of the widget provider. In the callback, the [updateForm](../reference/apis/js-apis-app-form-formProvider.md#updateform) API can be used to update the widget content. For details about the **onUpdateForm** lifecycle callback, see [Updating Widget Content Through FormExtensionAbility](arkts-ui-widget-event-formextensionability.md). + +```ts +import formHost from '@ohos.app.form.formHost'; + +@Entry() +@Component +struct WidgetCard { + formId = ...; // Widget ID + + build() { + Button (`Update Widget`) + .type(ButtonType.Capsule) + .width('50%') + .height(50) + .onClick(() => { + console.info('FormAbility update form click'); + // formId is the ID of the widget to be updated. + formHost.requestForm(this.formId.toString()).then(() => { + console.info('Succeeded in requestForming.'); + }); + }) + + ... + } +} +``` diff --git a/en/application-dev/application-models/arkts-ui-widget-creation.md b/en/application-dev/application-models/arkts-ui-widget-creation.md new file mode 100644 index 0000000000000000000000000000000000000000..cc8843c0294cf745e7737164e46cb8f69256efa2 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-creation.md @@ -0,0 +1,19 @@ +# Creating an ArkTS Widget + +To create an ArkTS widget in an existing application project, perform the following steps: + +1. Create a widget. + + ![WidgetProjectCreate1](figures/WidgetProjectCreate1.png) + +2. Select a widget template based on the actual service scenario. + + ![WidgetProjectCreate2](figures/WidgetProjectCreate2.png) + +3. Set **Language** to **ArkTS** and click **Finish**. + + ![WidgetProjectCreate3](figures/WidgetProjectCreate3.png) + +After an ArkTS widget is created, the following widget-related files are added to the project directory: **EntryFormAbility.ts** (widget lifecycle management file), **WidgetCard.ets** (widget page file), and **form_config.json** (widget configuration file). + +![WidgetProjectView](figures/WidgetProjectView.png) diff --git a/en/application-dev/application-models/arkts-ui-widget-event-formextensionability.md b/en/application-dev/application-models/arkts-ui-widget-event-formextensionability.md new file mode 100644 index 0000000000000000000000000000000000000000..861f5ca66eea9a06ee50c7b1448e1f6ed040c01a --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-event-formextensionability.md @@ -0,0 +1,66 @@ +# Updating Widget Content Through FormExtensionAbility + + +On the widget page, the **postCardAction** API can be used to trigger a message event to the FormExtensionAbility, which then updates the widget content. The following is an example of this widget update mode. + + +- On the widget page, register the **onClick** event callback of the button and call the **postCardAction** API in the callback to trigger the event to the FormExtensionAbility. + + ```ts + let storage = new LocalStorage(); + @Entry(storage) + @Component + struct WidgetCard { + @LocalStorageProp('title') title: string = 'init'; + @LocalStorageProp('detail') detail: string = 'init'; + + build() { + Column() { + Button ('Update') + .onClick(() => { + postCardAction(this, { + 'action': 'message', + 'params': { + 'msgTest': 'messageEvent' + } + }); + }) + Text(`${this.title}`) + Text(`${this.detail}`) + } + .width('100%') + .height('100%') + } + } + ``` + +- Call the [updateForm](../reference/apis/js-apis-app-form-formProvider.md#updateform) API to update the widget in the **onFormEvent** callback of the FormExtensionAbility. + + ```ts + import formBindingData from '@ohos.app.form.formBindingData'; + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import formProvider from '@ohos.app.form.formProvider'; + + export default class EntryFormAbility extends FormExtensionAbility { + onFormEvent(formId, message) { + // Called when a specified message event defined by the form provider is triggered. + console.info(`FormAbility onEvent, formId = ${formId}, message: ${JSON.stringify(message)}`); + let formData = { + 'title':'Title Update Success.', // Matches the widget layout. + 'detail':'Detail Update Success.', // Matches the widget layout. + }; + let formInfo = formBindingData.createFormBindingData(formData) + formProvider.updateForm(formId, formInfo).then((data) => { + console.info('FormAbility updateForm success.' + JSON.stringify(data)); + }).catch((error) => { + console.error('FormAbility updateForm failed: ' + JSON.stringify(error)); + }) + } + + // ... + } + ``` + + The figure below shows the effect. + + ![WidgetUpdatePage](figures/WidgetUpdatePage.png) diff --git a/en/application-dev/application-models/arkts-ui-widget-event-overview.md b/en/application-dev/application-models/arkts-ui-widget-event-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..fbc77b97a27b52b0f7b2a3b0cebc5b5cb5940f72 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-event-overview.md @@ -0,0 +1,62 @@ +# Widget Event Capability Overview + + +The ArkTS widget provides the **postCardAction()** API for interaction between the widget internal and the provider application. Currently, this API supports the router, message, and call events and can be called only in the widget. + + +![WidgetPostCardAction](figures/WidgetPostCardAction.png) + + +Definition: postCardAction(component: Object, action: Object): void + + +Parameters: + + +| Name| Type| Mandatory| Description| +| -------- | -------- | -------- | -------- | +| component | Object | Yes| Instance of the current custom component. Generally, **this** is transferred.| +| action | Object | Yes| Action description. For details, see the following table.| + + +Description of the action parameter + + +| **Key** | **Value** | Description| +| -------- | -------- | -------- | +| "action" | string | Action type.
- **"router"**: application redirection. If this type of action is triggered, the corresponding UIAbility is displayed. Only the UIAbility of the current application can be displayed.
- **"message"**: custom message. If this type of action is triggered, the [onFormEvent()](../reference/apis/js-apis-app-form-formExtensionAbility.md#onformevent) lifecycle callback of the provider FormExtensionAbility is called.
- **"call"**: application startup in the background. If this type of action is triggered, the corresponding UIAbility is started but does not run in the foreground. The target application must have the permission to run in the background ([ohos.permission.KEEP_BACKGROUND_RUNNING](../security/permission-list.md#ohospermissionkeep_background_running)).| +| "bundleName" | string | Name of the target bundle when **action** is **"router"** or **"call"**. This parameter is optional.| +| "moduleName" | string | Name of the target module when **action** is **"router"** or **"call"**. This parameter is optional.| +| "abilityName" | string | Name of the target UIAbility when **action** is **"router"** or **"call"**. This parameter is mandatory.| +| "params" | Object | Additional parameters carried in the current action. The value is a key-value pair in JSON format.| + + +Sample code of the **postCardAction()** API: + + + +```typescript +Button ('Jump') + .width('40%') + .height('20%') + .onClick(() => { + postCardAction(this, { + 'action': 'router', + 'bundleName': 'com.example.myapplication', + 'abilityName': 'EntryAbility', + 'params': { + 'message': 'testForRouter' // Customize the message to be sent. + } + }); + }) +``` + + +The following are typical widget development scenarios that can be implemented through widget events: + + +- [Updating Widget Content Through FormExtensionAbility](arkts-ui-widget-event-formextensionability.md) + +- [Updating Widget Content Through UIAbility](arkts-ui-widget-event-uiability.md) + +- [Redirecting to a Specified Page Through the Router Event](arkts-ui-widget-event-router.md) diff --git a/en/application-dev/application-models/arkts-ui-widget-event-router.md b/en/application-dev/application-models/arkts-ui-widget-event-router.md new file mode 100644 index 0000000000000000000000000000000000000000..371cbc6b2729a7985ed2fd183297ed771fddb11d --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-event-router.md @@ -0,0 +1,110 @@ +# Redirecting to a Specified Page Through the Router Event + + +The **router** capability of the **postCardAction** API can be used in a widget to quickly start the widget provider application. An application can provide different buttons through the widget so that users can jump to different pages at the touch of a button. For example, a camera widget provides the buttons that direct the user to respective pages, such as the page for taking a photo and the page for recording a video. + + +![WidgerCameraCard](figures/WidgerCameraCard.png) + + +Generally, a button is used to start a page. + + +- Design two buttons on the widget page. When one of the buttons is clicked, **postCardAction** is called to send a router event to the specified UIAbility, with the content to be transferred defined in the event. + + ```ts + @Entry + @Component + struct WidgetCard { + build() { + Column() { + Button ('Function A') + .margin('20%') + .onClick(() => { + console.info('Jump to EntryAbility funA'); + postCardAction(this, { + 'action': 'router', + 'abilityName': 'EntryAbility', // Only the UIAbility of the current application is allowed. + 'params': { + 'targetPage': 'funA' // Process the information in the EntryAbility. + } + }); + }) + + Button ('Function B') + .margin('20%') + .onClick(() => { + console.info('Jump to EntryAbility funB'); + postCardAction(this, { + 'action': 'router', + 'abilityName': 'EntryAbility', // Only the UIAbility of the current application is allowed. + 'params': { + 'targetPage': 'funB' // Process the information in the EntryAbility. + } + }); + }) + } + .width('100%') + .height('100%') + } + } + ``` + +- The UIAbility receives the router event and obtains parameters. It then starts the page specified in the received message. + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility'; + import window from '@ohos.window'; + + let selectPage = ""; + let currentWindowStage = null; + + export default class CameraAbility extends UIAbility { + // If the UIAbility is started for the first time, the onCreate lifecycle callback is triggered after the router event is received. + onCreate(want, launchParam) { + // Obtain the targetPage parameter passed in the router event. + console.info("onCreate want:" + JSON.stringify(want)); + if (want.parameters.params !== undefined) { + let params = JSON.parse(want.parameters.params); + console.info("onCreate router targetPage:" + params.targetPage); + selectPage = params.targetPage; + } + } + // If the UIAbility is running in the background, the onNewWant lifecycle callback is triggered after the router event is received. + onNewWant(want, launchParam) { + console.info("onNewWant want:" + JSON.stringify(want)); + if (want.parameters.params !== undefined) { + let params = JSON.parse(want.parameters.params); + console.info("onNewWant router targetPage:" + params.targetPage); + selectPage = params.targetPage; + } + if (currentWindowStage != null) { + this.onWindowStageCreate(currentWindowStage); + } + } + + onWindowStageCreate(windowStage: window.WindowStage) { + let targetPage; + // Start the page specified by targetPage. + switch (selectPage) { + case 'funA': + targetPage = 'pages/FunA'; + break; + case 'funB': + targetPage = 'pages/FunB'; + break; + default: + targetPage = 'pages/Index'; + } + if (currentWindowStage === null) { + currentWindowStage = windowStage; + } + windowStage.loadContent(targetPage, (err, data) => { + if (err && err.code) { + console.info('Failed to load the content. Cause: %{public}s', JSON.stringify(err)); + return; + } + }); + } + }; + ``` diff --git a/en/application-dev/application-models/arkts-ui-widget-event-uiability.md b/en/application-dev/application-models/arkts-ui-widget-event-uiability.md new file mode 100644 index 0000000000000000000000000000000000000000..0d6cb33a3749c81b6b41dd4904ba64c89a7942ae --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-event-uiability.md @@ -0,0 +1,86 @@ +# Updating Widget Content Through UIAbility + + +On the widget page, the **postCardAction** API can be used to trigger a router or call event to start the UIAbility, which then updates the widget content. The following is an example of this widget update mode. + + +- On the widget page, register the **onClick** event callback of the button and call the **postCardAction** API in the callback to trigger the event to the FormExtensionAbility. + + ```ts + let storage = new LocalStorage(); + @Entry(storage) + @Component + struct WidgetCard { + @LocalStorageProp('detail') detail: string = 'init'; + + build() { + Column() { + Button ('Jump') + .margin('20%') + .onClick(() => { + console.info('postCardAction to EntryAbility'); + postCardAction(this, { + 'action': 'router', + 'abilityName': 'EntryAbility', // Only the UIAbility of the current application is allowed. + 'params': { + 'detail': 'RouterFromCard' + } + }); + }) + Text(`${this.detail}`).margin('20%') + } + .width('100%') + .height('100%') + } + } + ``` + +- In the **onCreate()** or **onNewWant()** lifecycle callback of the UIAbility, use the input parameter **want** to obtain the ID (**formID**) and other information of the widget, and then call the [updateForm](../reference/apis/js-apis-app-form-formProvider.md#updateform) API to update the widget. + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility'; + import formBindingData from '@ohos.app.form.formBindingData'; + import formProvider from '@ohos.app.form.formProvider'; + import formInfo from '@ohos.app.form.formInfo'; + + export default class EntryAbility extends UIAbility { + // If the UIAbility is started for the first time, the onCreate lifecycle callback is triggered after the router event is received. + onCreate(want, launchParam) { + console.info('Want:' + JSON.stringify(want)); + if (want.parameters[formInfo.FormParam.IDENTITY_KEY] !== undefined) { + let curFormId = want.parameters[formInfo.FormParam.IDENTITY_KEY]; + let message = JSON.parse(want.parameters.params).detail; + console.info(`UpdateForm formId: ${curFormId}, message: ${message}`); + let formData = { + "detail": message +': onCreate UIAbility.', // Matches the widget layout. + }; + let formMsg = formBindingData.createFormBindingData(formData) + formProvider.updateForm(curFormId, formMsg).then((data) => { + console.info('updateForm success.' + JSON.stringify(data)); + }).catch((error) => { + console.error('updateForm failed:' + JSON.stringify(error)); + }) + } + } + // If the UIAbility is running in the background, the onNewWant lifecycle callback is triggered after the router event is received. + onNewWant(want, launchParam) { + console.info('onNewWant Want:' + JSON.stringify(want)); + if (want.parameters[formInfo.FormParam.IDENTITY_KEY] !== undefined) { + let curFormId = want.parameters[formInfo.FormParam.IDENTITY_KEY]; + let message = JSON.parse(want.parameters.params).detail; + console.info(`UpdateForm formId: ${curFormId}, message: ${message}`); + let formData = { + "detail": message +': onNewWant UIAbility.', // Matches the widget layout. + }; + let formMsg = formBindingData.createFormBindingData(formData) + formProvider.updateForm(curFormId, formMsg).then((data) => { + console.info('updateForm success.' + JSON.stringify(data)); + }).catch((error) => { + console.error('updateForm failed:' + JSON.stringify(error)); + }) + } + } + + ... + } + ``` diff --git a/en/application-dev/application-models/arkts-ui-widget-image-update.md b/en/application-dev/application-models/arkts-ui-widget-image-update.md new file mode 100644 index 0000000000000000000000000000000000000000..00c00a744afd8422274617005a50583fef5d92ee --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-image-update.md @@ -0,0 +1,166 @@ +# Updating Local and Online Images in the Widget + + +Generally, local images or online images downloaded from the network need to be displayed on a widget. To obtain local and online images, use the FormExtensionAbility. The following exemplifies how to show local and online images on a widget. + + +1. Internet access is required for downloading online images. Therefore, you need to apply for the **ohos.permission.INTERNET** permission. For details, see[Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md). + +2. Update local files in the **onAddForm** lifecycle callback of the EntryFormAbility. + + ```ts + import formBindingData from '@ohos.app.form.formBindingData'; + import formProvider from '@ohos.app.form.formProvider'; + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import request from '@ohos.request'; + import fs from '@ohos.file.fs'; + + export default class EntryFormAbility extends FormExtensionAbility { + ... + // When the widget is added, a local image is opened and transferred to the widget page for display. + onAddForm(want) { + // Assume that the local image head.PNG is in the tmp directory of the current widget. + let tempDir = this.context.getApplicationContext().tempDir; + // Open the local image and obtain the FD after the image is opened. + let file; + try { + file = fs.openSync(tempDir + '/' + 'head.PNG'); + } catch (e) { + console.error(`openSync failed: ${JSON.stringify(e)}`); + } + let formData = { + 'text': 'Image: Bear', + 'imgName': 'imgBear', + 'formImages': { + 'imgBear': file.fd + }, + 'loaded': true + } + // Encapsulate the FD in formData and return it to the widget page. + return formBindingData.createFormBindingData(formData); + } + + ... + } + ``` + +3. Update online files in the onFormEvent lifecycle callback of the EntryFormAbility. + + ```ts + import formBindingData from '@ohos.app.form.formBindingData'; + import formProvider from '@ohos.app.form.formProvider'; + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import request from '@ohos.request'; + import fs from '@ohos.file.fs'; + + export default class EntryFormAbility extends FormExtensionAbility { + // When the message event is triggered on the widget page, an online image is downloaded and transferred to the widget page for display. + onFormEvent(formId, message) { + let formInfo = formBindingData.createFormBindingData({ + 'text': 'Updating...' + }) + formProvider.updateForm(formId, formInfo) + // Note: The FormExtensionAbility is started when the lifecycle callback is triggered. It can run in the background for only 5 seconds. + // When possible, limit the size of the image to download. If an image cannot be downloaded within 5 seconds, it cannot be updated to the widget page. + let netFile = 'https://xxxx/xxxx.png'; // Specify the URL of the image to download. + let tempDir = this.context.getApplicationContext().tempDir; + let fileName = 'file' + Date.now(); + let tmpFile = tempDir + '/' + fileName; + request.downloadFile(this.context, { + url: netFile, filePath: tmpFile, enableMetered: true, enableRoaming: true + }).then((task) => { + task.on('complete', function callback() { + console.info('ArkTSCard download complete:' + tmpFile); + let file; + try { + file = fs.openSync(tmpFile); + } catch (e) { + console.error(`openSync failed: ${JSON.stringify(e)}`); + } + let fileInfo = {}; + fileInfo[fileName] = file.fd; + let formData = { + 'text': 'Image:' + fileName, + 'imgName': fileName, + 'formImages': fileInfo, + 'loaded': true + }; + let formInfo = formBindingData.createFormBindingData(formData) + formProvider.updateForm(formId, formInfo).then((data) => { + console.info('FormAbility updateForm success.' + JSON.stringify(data)); + }).catch((error) => { + console.error('FormAbility updateForm failed: ' + JSON.stringify(error)); + }) + }) + task.on('fail', function callBack(err) { + console.info('ArkTSCard download task failed. Cause:' + err); + let formInfo = formBindingData.createFormBindingData({ + 'text':'Update failed.' + }) + formProvider.updateForm(formId, formInfo) + }); + }).catch((err) => { + console.error('Failed to request the download. Cause: ' + JSON.stringify(err)); + }); + } + + ... + }; + ``` + +4. On the widget page, use the **\** component to display the widget content transferred from the EntryFormAbility. + + ```ts + let storage = new LocalStorage(); + @Entry(storage) + @Component + struct WidgetCard { + @LocalStorageProp('text') text: string = 'Loading...'; + @LocalStorageProp('loaded') loaded: boolean = false; + @LocalStorageProp('imgName') imgName: string = 'name'; + + build() { + Column() { + Text(this.text) + .fontSize('12vp') + .textAlign(TextAlign.Center) + .width('100%') + .height('15%') + + Row() { + if (this.loaded) { + Image('memory://' + this.imgName) + .width('50%') + .height('50%') + .margin('5%') + } else { + Image('common/start.PNG') + .width('50%') + .height('50%') + .margin('5%') + } + }.alignItems(VerticalAlign.Center) + .justifyContent(FlexAlign.Center) + + Button ('Update') + .height('15%') + .onClick(() => { + postCardAction(this, { + 'action': 'message', + 'params': { + 'info': 'refreshImage' + } + }); + }) + } + .width('100%').height('100%') + .alignItems(HorizontalAlign.Center) + .padding('5%') + } + } + ``` + +> **NOTE** +> - The **\** component displays images in the remote memory based on the **memory://** identifier in the input parameter (**memory://fileName**). The **fileName** value must be consistent with the key in the object (**'formImages': {key: fd}**) passed by the EntryFormAbility. +> +> - The **\** component determines whether to update the image based on whether the input parameter is changed. Therefore, the value of **imgName** passed by the EntryFormAbility each time must be different. If the two values of **imgName** passed consecutively are identical, the image is not updated. diff --git a/en/application-dev/application-models/arkts-ui-widget-interaction-overview.md b/en/application-dev/application-models/arkts-ui-widget-interaction-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..76c4a202543c00f3df44f71b0a33d417831b5f53 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-interaction-overview.md @@ -0,0 +1,20 @@ +# Widget Data Interaction + + +The ArkTS widget framework provides the **updateForm()** and **requestForm()** APIs to proactively trigger widget updates. + + +![WidgetLocalStorageProp](figures/WidgetLocalStorageProp.png) + + +| API| System Capability| Constraints| +| -------- | -------- | -------- | +| updateForm | No| 1. Invoked by the provider.
2. Allows only the widget provider to update its own widgets. It cannot be used to update widgets by other providers.| +| requestForm | Yes| 1. Invoked by the host.
2. Allows only the widget host to update the widgets added to it. It cannot be used to update widgets added to other hosts.| + +The following describes the typical use cases of widget updates: + +- [Configuring a Widget to Update Periodically](arkts-ui-widget-update-by-time.md) +- [Updating Local and Online Images](arkts-ui-widget-image-update.md) +- [Updating Widget Content by State](arkts-ui-widget-update-by-status.md) +- [Updating Widget Content by Widget Host (for System Applications Only)](arkts-ui-widget-content-update.md) diff --git a/en/application-dev/application-models/arkts-ui-widget-lifecycle.md b/en/application-dev/application-models/arkts-ui-widget-lifecycle.md new file mode 100644 index 0000000000000000000000000000000000000000..4cb68536312e26e0f7c98546839134c0ab435a8c --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-lifecycle.md @@ -0,0 +1,95 @@ +# Widget Lifecycle Management + + +When creating an ArkTS widget, you need to implement the [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md) lifecycle APIs. + + +1. Import related modules to **EntryFormAbility.ts**. + + ```ts + import formInfo from '@ohos.app.form.formInfo'; + import formBindingData from '@ohos.app.form.formBindingData'; + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import formProvider from '@ohos.app.form.formProvider'; + ``` + +2. In **EntryFormAbility.ts**, implement the [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md) lifecycle APIs, including **onAddForm**, whose **want** parameter can be used to obtain the widget information through [FormParam](../reference/apis/js-apis-app-form-formInfo.md#formparam). + + ```typescript + import formInfo from '@ohos.app.form.formInfo'; + import formBindingData from '@ohos.app.form.formBindingData'; + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import formProvider from '@ohos.app.form.formProvider'; + + export default class EntryFormAbility extends FormExtensionAbility { + onAddForm(want) { + console.info('[EntryFormAbility] onAddForm'); + // Obtain the unique widget ID formId from the want parameter. + let formId: string = want.parameters[formInfo.FormParam.IDENTITY_KEY]; + // Called when the widget is created. The widget provider should return the widget data binding class. + let obj = { + 'title': 'titleOnAddForm', + 'detail': 'detailOnAddForm' + }; + let formData = formBindingData.createFormBindingData(obj); + return formData; + } + + onCastToNormalForm(formId) { + // Called when the form provider is notified that a temporary form is successfully + // converted to a normal form. + // Called when the widget host converts the temporary widget into a normal one. The widget provider should do something to respond to the conversion. + console.info(`[EntryFormAbility] onCastToNormalForm, formId: ${formId}`); + } + + onUpdateForm(formId) { + // Override this method to support scheduled updates, periodic updates, or updates requested by the widget host. + console.info('[EntryFormAbility] onUpdateForm'); + let obj = { + 'title': 'titleOnUpdateForm', + 'detail': 'detailOnUpdateForm' + }; + let formData = formBindingData.createFormBindingData(obj); + formProvider.updateForm(formId, formData).catch((err) => { + if (err) { + // Print errors. + console.error(`[EntryFormAbility] Failed to updateForm. Code: ${err.code}, message: ${err.message}`); + return; + } + }); + } + + onChangeFormVisibility(newStatus) { + // Called when the form provider receives form events from the system. + // The callback is performed only when formVisibleNotify is set to true and the application is a system application. + console.info('[EntryFormAbility] onChangeFormVisibility'); + } + + onFormEvent(formId, message) { + // Called when a specified message event defined by the form provider is triggered. + // If the widget supports event triggering, override this method and implement the trigger. + console.info('[EntryFormAbility] onFormEvent'); + } + + onRemoveForm(formId) { + // Called to notify the form provider that a specified form has been destroyed. + // Called when the corresponding widget is deleted. The input parameter is the ID of the deleted card. + console.info('[EntryFormAbility] onRemoveForm'); + } + + onConfigurationUpdate(config) { + // Called when the system configuration is updated. + console.info('[EntryFormAbility] configurationUpdate:' + JSON.stringify(config)); + } + + onAcquireFormState(want) { + // Called to return a {@link FormState} object. + // Called when the widget provider receives the status query result of a widget. By default, the initial state of the widget is returned. + return formInfo.FormState.READY; + } + } + ``` + + +> **NOTE** +> The FormExtensionAbility cannot reside in the background. Therefore, continuous tasks cannot be processed in the widget lifecycle callbacks. The FormExtensionAbility persists for 5 seconds after the lifecycle callback is completed and will exit if no new lifecycle callback is invoked during this time frame. For the service logic that may take more than 5 seconds to complete, it is recommended that you [start the application](arkts-ui-widget-event-uiability.md). After the processing is complete, use the [updateForm](../reference/apis/js-apis-app-form-formProvider.md#updateform) to notify the widget of the update. diff --git a/en/application-dev/application-models/arkts-ui-widget-modules.md b/en/application-dev/application-models/arkts-ui-widget-modules.md new file mode 100644 index 0000000000000000000000000000000000000000..5084b7ea5045002759ca57f10c055ef5623eb7d0 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-modules.md @@ -0,0 +1,24 @@ +# ArkTS Widget Related Modules + + + **Figure 1** ArkTS widget related modules +![WidgetModules](figures/WidgetModules.png) + + +- [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md): provides lifecycle callbacks invoked when a widget is created, destroyed, or updated. + +- [FormExtensionContext](../reference/apis/js-apis-inner-application-formExtensionContext.md): provides context for FormExtensionAbilities. You can use the APIs of this module to start FormExtensionAbilities. + +- [formProvider](../reference/apis/js-apis-app-form-formProvider.md): provides APIs related to the widget provider. You can use the APIs to update a widget, set the next update time for a widget, obtain widget information, and request a widget release. + +- [formInfo](../reference/apis/js-apis-app-form-formInfo.md): provides types and enums related to the widget information and state. + +- [formBindingData](../reference/apis/js-apis-app-form-formBindingData.md): provides APIs for widget data binding. You can use the APIs to create a **FormBindingData** object and obtain related information. + +- [Page Layout (Card.ets)](arkts-ui-widget-page-overview.md): provides APIs for a declarative paradigm UI. + - [ArkTS widget capabilities](arkts-ui-widget-event-overview.md): include the **postCardAction** API used for interaction between the widget internal and the provider application and can be called only in the widget. + - [ArkTS widget capability list](arkts-ui-widget-page-overview.md#page-capabilities-supported-by-arkts-widgets): lists the APIs, components, events, attributes, and lifecycle callbacks that can be used in ArkTS widgets. + +- [Widget configuration](arkts-ui-widget-configuration.md): includes FormExtensionAbility configuration and widget configuration. + - Configure FormExtensionAbility information under **extensionAbilities** in the [module.json5 file](../quick-start/module-configuration-file.md). + - Configure the widget configuration information (**WidgetCard.ets**) in the [form_config.json](arkts-ui-widget-configuration.md) file in **resources/base/profile**. diff --git a/en/application-dev/application-models/arkts-ui-widget-page-animation.md b/en/application-dev/application-models/arkts-ui-widget-page-animation.md new file mode 100644 index 0000000000000000000000000000000000000000..9a940aeecb62682a185ba8c0529adc38017c8e2d --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-page-animation.md @@ -0,0 +1,45 @@ +# Using Animations in the Widget + + +To make your ArkTS widget more engaging, you can apply animations to it, including [explicit animation](../reference/arkui-ts/ts-explicit-animation.md), [attribute animation](../reference/arkui-ts/ts-animatorproperty.md), and [component transition](../reference/arkui-ts/ts-transition-animation-component.md). Note the following restrictions when using the animations in ArkTS widgets. + + + **Table 1** Restrictions on animation parameters + +| Name| Description| Description| +| -------- | -------- | -------- | +| duration | Animation playback duration| The maximum value is 1 second. If a larger value is set, the animation is still played for 1 second.| +| tempo | Animation playback speed.| Do not set this parameter in the widget. Use the default value 1.| +| delay | Animation delay duration.| Do not set this parameter in the widget. Use the default value 0.| +| iterations | Number of times that the animation is played.| Do not set this parameter in the widget. Use the default value 1.| + + +The following sample code implements the animation effect of button rotation: + + +![WidgetAnimation](figures/WidgetAnimation.gif) + + + +```ts +@Entry +@Component +struct AttrAnimationExample { + @State rotateAngle: number = 0; + + build() { + Column() { + Button('change rotate angle') + .onClick(() => { + this.rotateAngle = 90; + }) + .margin(50) + .rotate({ angle: this.rotateAngle }) + .animation({ + curve: Curve.EaseOut, + playMode: PlayMode.AlternateReverse + }) + }.width('100%').margin({ top: 20 }) + } +} +``` diff --git a/en/application-dev/application-models/arkts-ui-widget-page-custom-drawing.md b/en/application-dev/application-models/arkts-ui-widget-page-custom-drawing.md new file mode 100644 index 0000000000000000000000000000000000000000..49523d60af886db40b55fc90d80c9bd5027cade8 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-page-custom-drawing.md @@ -0,0 +1,79 @@ +# Applying Custom Drawing in the Widget + + + You can apply custom drawing in your ArkTS widget to create a more vibrant experience. Use the [Canvas](../reference/arkui-ts/ts-components-canvas-canvas.md) component to create a canvas on the widget, and then use the [CanvasRenderingContext2D](../reference/arkui-ts/ts-canvasrenderingcontext2d.md) object to draw custom graphics on the canvas. The following code shows how to draw a smiling face in the center of the canvas. + +```typescript +@Entry +@Component +struct Card { + private canvasWidth: number = 0; + private canvasHeight: number = 0; + // Initialize CanvasRenderingContext2D and RenderingContextSettings. + private settings: RenderingContextSettings = new RenderingContextSettings(true); + private context: CanvasRenderingContext2D = new CanvasRenderingContext2D(this.settings); + + build() { + Column() { + Row() { + Canvas(this.context) + .margin('5%') + .width('90%') + .height('90%') + .onReady(() => { + console.info('[ArkTSCard] onReady for canvas draw content'); + // Obtain the actual width and height of the canvas in the onReady callback. + this.canvasWidth = this.context.width; + this.canvasHeight = this.context.height; + // Draw the background of the canvas. + this.context.fillStyle = 'rgba(203, 154, 126, 1.00)'; + this.context.fillRect(0, 0, this.canvasWidth, this.canvasHeight); + // Draw a red circle in the center of the canvas. + this.context.beginPath(); + let radius = this.context.width / 3 + let circleX = this.context.width / 2 + let circleY = this.context.height / 2 + this.context.moveTo(circleX - radius, circleY); + this.context.arc(circleX, circleY, radius, 2 * Math.PI, 0, true); + this.context.closePath(); + this.context.fillStyle = 'red'; + this.context.fill(); + // Draw the left eye of the smiling face. + let leftR = radius / 4 + let leftX = circleX - (radius / 2) + let leftY = circleY - (radius / 3.5) + this.context.beginPath(); + this.context.arc(leftX, leftY, leftR, 0, Math.PI, true); + this.context.strokeStyle = '#ffff00' + this.context.lineWidth = 10 + this.context.stroke() + // Draw the right eye of the smiling face. + let rightR = radius / 4 + let rightX = circleX + (radius / 2) + let rightY = circleY - (radius / 3.5) + this.context.beginPath(); + this.context.arc(rightX, rightY, rightR, 0, Math.PI, true); + this.context.strokeStyle = '#ffff00' + this.context.lineWidth = 10 + this.context.stroke() + // Draw the mouth of the smiling face. + let mouthR = radius / 2.5 + let mouthX = circleX + let mouthY = circleY + (radius / 3) + this.context.beginPath(); + this.context.arc(mouthX, mouthY, mouthR, Math.PI, 0, true); + this.context.strokeStyle = '#ffff00' + this.context.lineWidth = 10 + this.context.stroke() + }) + } + }.height('100%').width('100%') + } +} +``` + + +The figure below shows the effect. + + +![WidgetCanvasDemo](figures/WidgetCanvasDemo.jpeg) diff --git a/en/application-dev/application-models/arkts-ui-widget-page-overview.md b/en/application-dev/application-models/arkts-ui-widget-page-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..2c709ff5c7c13c09e5a303f0adfeebe5c97690bd --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-page-overview.md @@ -0,0 +1,21 @@ +# Widget Page Capability Overview + + +You can leverage the ArkUI declarative paradigm to develop ArkTS widget pages. The following widget pages are automatically generated by a DevEco Studio template. You can adjust the pages based on the real-world service scenarios. + + +![WidgetPreviewPage](figures/WidgetPreviewPage.png) + + +ArkTS widgets have full capabilities of JS widgets, with added animation and custom drawing capabilities plus partial support for components, events, animations, data management, and state management capabilities of the [declarative paradigm](../reference/arkui-ts/ts-components-summary.md). For details, see [Page Capabilities Supported by ArkTS Widgets](#page-capabilities-supported-by-arkts-widgets). + + +## Page Capabilities Supported by ArkTS Widgets + +For details about the page capabilities supported by ArkTS widgets, see [Learning ArkTS](../quick-start/arkts-create-custom-components.md) and [ArkTS-based Declarative Development Paradigm](../reference/arkui-ts/ts-components-summary.md). + +Only the APIs marked with "supported in ArkTS widgets" can be used for ArkTS widgets. Pay special attention to the capability differences with applications. + +For example, the following description indicates that the @Component decorator can be used in ArkTS widgets. + +![WidgetSupportApi](figures/WidgetSupportApi.png) diff --git a/en/application-dev/application-models/arkts-ui-widget-update-by-status.md b/en/application-dev/application-models/arkts-ui-widget-update-by-status.md new file mode 100644 index 0000000000000000000000000000000000000000..8952b8dff4ecdd3acad6b1a65513d8e529c4dc70 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-update-by-status.md @@ -0,0 +1,170 @@ +# Updating Widget Content by State + + +Multiple widgets of the same application can be configured to implement different features. For example, two weather widgets can be added to the home screen: one for displaying the weather of London, and the other Beijing. The widget is set to be updated at 07:00 every morning. It needs to detect the configured city, and then updates the city-specific weather information. The following example describes how to dynamically update the widget content based on the state. + + +- Widget configuration file: Configure the widget to be updated at 07:00 every morning. + + ```json + { + "forms": [ + { + "name": "widget", + "description": "This is a service widget.", + "src": "./ets/widget/pages/WidgetCard.ets", + "uiSyntax": "arkts", + "window": { + "designWidth": 720, + "autoDesignWidth": true + }, + "colorMode": "auto", + "isDefault": true, + "updateEnabled": true,"scheduledUpdateTime": "07:00", + "updateDuration": 0, + "defaultDimension": "2*2", + "supportDimensions": ["2*2"] + } + ] + } + ``` + +- Widget page: A widget has different states and needs to be updated by state. When the state changes, **postCardAction** is called to notify the EntryFormAbility. + + ```ts + let storage = new LocalStorage(); + @Entry(storage) + @Component + struct WidgetCard { + @LocalStorageProp('textA') textA: string = 'To be updated...'; + @LocalStorageProp('textB') textB: string ='To be updated...'; + @State selectA: boolean = false; + @State selectB: boolean = false; + + build() { + Column() { + Row() { + Checkbox({ name: 'checkbox1', group: 'checkboxGroup' }) + .select(false) + .onChange((value: boolean) => { + this.selectA = value; + postCardAction(this, { + 'action': 'message', + 'params': { + 'selectA': JSON.stringify(value) + } + }); + }) + Text ('State A') + } + + Row() { + Checkbox({ name: 'checkbox2', group: 'checkboxGroup' }) + .select(false) + .onChange((value: boolean) => { + this.selectB = value; + postCardAction(this, { + 'action': 'message', + 'params': { + 'selectB': JSON.stringify(value) + } + }); + }) + Text ('State B') + } + + Row() {// Content that is updated only in state A + Text('State A: ') + Text(this.textA) + } + + Row() { // Content that is updated only in state B + Text ('State B:') + Text(this.textB) + } + }.padding('10%') + } + } + ``` + +- EntryFormAbility: The widget state data is stored in the local database. When the update event callback is triggered, the current widget state is obtained through **formId**, and then content is updated based on the state obtained. + + ```ts + import formInfo from '@ohos.app.form.formInfo' + import formProvider from '@ohos.app.form.formProvider'; + import formBindingData from '@ohos.app.form.formBindingData'; + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import dataStorage from '@ohos.data.storage' + + export default class EntryFormAbility extends FormExtensionAbility { + onAddForm(want) { + let formId = want.parameters[formInfo.FormParam.IDENTITY_KEY]; + let isTempCard: boolean = want.parameters[formInfo.FormParam.TEMPORARY_KEY]; + if (isTempCard === false) {// If the widget is a normal one, the widget information is persisted. + console.info('Not temp card, init db for:' + formId); + let storeDB = dataStorage.getStorageSync(this.context.filesDir + 'myStore') + storeDB.putSync('A' + formId, 'false'); + storeDB.putSync('B' + formId, 'false'); + storeDB.flushSync(); + } + let formData = {}; + return formBindingData.createFormBindingData(formData); + } + + onRemoveForm(formId) { + console.info('onRemoveForm, formId:' + formId); + let storeDB = dataStorage.getStorageSync(this.context.filesDir + 'myStore') + storeDB.deleteSync('A' + formId); + storeDB.deleteSync('B' + formId); + } + + // If the widget is a temporary one, it is recommended that the widget information be persisted when the widget is converted to a normal one. + onCastToNormalForm(formId) { + console.info('onCastToNormalForm, formId:' + formId); + let storeDB = dataStorage.getStorageSync(this.context.filesDir + 'myStore') + storeDB.putSync('A' + formId, 'false'); + storeDB.putSync('B' + formId, 'false'); + storeDB.flushSync(); + } + + onUpdateForm(formId) { + let storeDB = dataStorage.getStorageSync(this.context.filesDir + 'myStore') + let stateA = storeDB.getSync('A' + formId, 'false').toString() + let stateB = storeDB.getSync('B' + formId, 'false').toString() + // Update textA in state A. + if (stateA === 'true') { + let formInfo = formBindingData.createFormBindingData({ + 'textA': 'AAA' + }) + formProvider.updateForm(formId, formInfo) + } + // Update textB in state B. + if (stateB === 'true') { + let formInfo = formBindingData.createFormBindingData({ + 'textB': 'BBB' + }) + formProvider.updateForm(formId, formInfo) + } + } + + onFormEvent(formId, message) { + // Store the widget state. + console.info('onFormEvent formId:' + formId + 'msg:' + message); + let storeDB = dataStorage.getStorageSync(this.context.filesDir + 'myStore') + let msg = JSON.parse(message) + if (msg.selectA != undefined) { + console.info('onFormEvent selectA info:' + msg.selectA); + storeDB.putSync('A' + formId, msg.selectA); + } + if (msg.selectB != undefined) { + console.info('onFormEvent selectB info:' + msg.selectB); + storeDB.putSync('B' + formId, msg.selectB); + } + storeDB.flushSync(); + } + }; + ``` + + +> **NOTE** +> When the local database is used for widget information persistence, it is recommended that [TEMPORARY_KEY](../reference/apis/js-apis-app-form-formInfo.md#formparam) be used to determine whether the currently added widget is a normal one in the [onAddForm](../reference/apis/js-apis-app-form-formExtensionAbility.md#onaddform) lifecycle callback. If the widget is a normal one, the widget information is directly persisted. If the widget is a temporary one, the widget information is persisted when the widget is converted to a normal one ([onCastToNormalForm](../reference/apis/js-apis-app-form-formExtensionAbility.md#oncasttonormalform)). In addition, the persistent widget information needs to be deleted when the widget is destroyed ([onRemoveForm](../reference/apis/js-apis-app-form-formExtensionAbility.md#onremoveform)), preventing the database size from continuously increasing due to repeated widget addition and deletion. diff --git a/en/application-dev/application-models/arkts-ui-widget-update-by-time.md b/en/application-dev/application-models/arkts-ui-widget-update-by-time.md new file mode 100644 index 0000000000000000000000000000000000000000..5b27a636f83f144110c5533a3d43baf0087c3716 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-update-by-time.md @@ -0,0 +1,99 @@ +# Configuring a Widget to Update Periodically + +Before configuring a widget to update periodically, enable the periodic update feature by setting the **updateEnabled** field to **true** in the **form_config.json** file. + +The widget framework provides the following modes of updating widgets periodically: + + +- Set the update interval: The widget will be updated at the specified interval. You can specify the interval by setting the [updateDuration](arkts-ui-widget-configuration.md) field in the **form_config.json** file. For example, you can configure the widget to update once an hour. + + > **NOTE** + > + > **updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used. + + ```json + { + "forms": [ + { + "name": "widget", + "description": "This is a service widget.", + "src": "./ets/widget/pages/WidgetCard.ets", + "uiSyntax": "arkts", + "window": { + "designWidth": 720, + "autoDesignWidth": true + }, + "colorMode": "auto", + "isDefault": true, + "updateEnabled": true, // Enable the periodic update feature. + "scheduledUpdateTime": "10:30", + "updateDuration": 2, // Set the interval to update the widget. The value is a natural number, in the unit of 30 minutes. + "defaultDimension": "2*2", + "supportDimensions": ["2*2"] + } + ] + } + ``` + +- Set the scheduled update time: The widget will be updated at the scheduled time every day. You can specify the time by setting the [scheduledUpdateTime](arkts-ui-widget-configuration.md) field in the **form_config.json** file. For example, you can configure the widget to update at 10:30 a.m. every day. + + > **NOTE** + > + > **updateDuration** takes precedence over **scheduledUpdateTime**. For the **scheduledUpdateTime** settings to take effect, set **updateDuration** to **0**. + + + ```json + { + "forms": [ + { + "name": "widget", + "description": "This is a service widget.", + "src": "./ets/widget/pages/WidgetCard.ets", + "uiSyntax": "arkts", + "window": { + "designWidth": 720, + "autoDesignWidth": true + }, + "colorMode": "auto", + "isDefault": true, + "updateEnabled": true, // Enable the periodic update feature. + "scheduledUpdateTime": "10:30", // Set the scheduled time to update the widget. + "updateDuration": 0, + "defaultDimension": "2*2", + "supportDimensions": ["2*2"] + } + ] + } + ``` + +- Set the next update time: The widget will be updated next time at the specified time. You can specify the time by calling the [setFormNextRefreshTime()](../reference/apis/js-apis-app-form-formProvider.md#setformnextrefreshtime) API. The minimum update interval is 5 minutes. For example, you can configure the widget to update within 5 minutes after the API is called. + + ```ts + import formProvider from '@ohos.app.form.formProvider'; + + let formId = '123456789'; // Use the actual widget ID in real-world scenarios. + try { + // Configure the widget to update in 5 minutes. + formProvider.setFormNextRefreshTime(formId, 5, (err, data) => { + if (err) { + console.error(`Failed to setFormNextRefreshTime. Code: ${err.code}, message: ${err.message}`); + return; + } else { + console.info('Succeeded in setFormNextRefreshTimeing.'); + } + }); + } catch (err) { + console.error(`Failed to setFormNextRefreshTime. Code: ${err.code}, message: ${err.message}`); + } + ``` + + +When periodic update is triggered, the system calls the [onUpdateForm()](../reference/apis/js-apis-app-form-formExtensionAbility.md#onupdateform) lifecycle callback of the FormExtensionAbility. In the callback, [updateForm()](../reference/apis/js-apis-app-form-formProvider.md#updateform) can be used to update the widget by the provider. For details about how to use **onUpdateForm()**, see [Updating Widget Content Through FormExtensionAbility](arkts-ui-widget-event-formextensionability.md). + + +> **NOTE** +> 1. Each widget can be updated at the specified interval for a maximum of 50 times every day, including updates triggered by setting [updateDuration](arkts-ui-widget-configuration.md) or calling [setFormNextRefreshTime()](../reference/apis/js-apis-app-form-formProvider.md#setformnextrefreshtime). When the limit is reached, the widget cannot be updated in this mode again. The number of update times is reset at 00:00 every day. +> +> 2. The same timer is used for timing updates at the specified interval. Therefore, the first scheduled update of widgets may have a maximum deviation of 30 minutes. For example, the first widget A (updated every half an hour) is added at 03:20. The timer starts and triggers an update every half an hour. The second widget B (updated every half an hour) is added at 03:40. When the timer event is triggered at 03:50, widget A is updated, and widget B will be updated at 04:20 next time. +> +> 3. Updates at the specified interval and updates at the scheduled time are triggered only when the screen is on. When the screen is off, the update action is merely recorded. When the screen is on, the update action is performed. diff --git a/en/application-dev/application-models/arkts-ui-widget-working-principles.md b/en/application-dev/application-models/arkts-ui-widget-working-principles.md new file mode 100644 index 0000000000000000000000000000000000000000..a0edb6c6c68d9ada32cd3ff34f5117d5cc012ed6 --- /dev/null +++ b/en/application-dev/application-models/arkts-ui-widget-working-principles.md @@ -0,0 +1,57 @@ +# ArkTS Widget Working Principles + + +## Implementation Principles + + **Figure 1** ArkTS widget implementation principles +![WidgetPrinciple](figures/WidgetPrinciple.png) + +- Widget host: an application that displays the widget content and controls the widget location. Only the system application can function as a widget host. + +- Widget provider: an application that provides the widget content to display and controls how widget components are laid out and how they interact with users. + +- Widget Manager: a resident agent that manages widgets in the system. It provides the [formProvider](../reference/apis/js-apis-app-form-formProvider.md) and [formHost](../reference/apis/js-apis-app-form-formHost.md) APIs as well as widget management, usage, and periodic updates. + +- Widget rendering service: a service that manages widget rendering instances. Widget rendering instances are bound to the [widget components](../reference/arkui-ts/ts-basic-components-formcomponent.md) on the widget host on a one-to-one basis. The widget rendering service runs the widget page code **widgets.abc** for rendering, and sends the rendered data to the corresponding widget component on the widget host. + + **Figure 2** Working principles of the ArkTS widget rendering service +![WidgetRender](figures/WidgetRender.png) + +Unlike JS widgets, ArkTS widgets support logic code running. To avoid potential ArkTS widget issues from affecting the use of applications, the widget page code **widgets.abc** is executed by the widget rendering service, which is managed by the Widget Manager. Each widget component of a widget host corresponds to a rendering instance in the widget rendering service. Rendering instances of an application provider run in the same virtual machine operating environment, and rendering instances of different application providers run in different virtual machine operating environments. In this way, the resources and state data are isolated between widgets of different application providers. During development, pay attention to the use of the [globalThis](uiability-data-sync-with-ui.md#using-globalthis-between-uiability-and-page) object. Use one **globalThis** object for widgets by the same application provider, and different **globalThis** objects for widgets by different application providers. + + +## Advantages of ArkTS Widgets + +As a quick entry to applications, ArkTS widgets have the following advantages over JS widgets: + +- Improved development experience and efficiency, thanks to the unified development paradigm + ArkTS widgets share the same declarative UI development framework as application pages. This means that the page layouts can be directly reused in widgets, improving development experience and efficiency. + + **Figure 3** Comparison of widget project structures + ![WidgetProject](figures/WidgetProject.png) + +- More widget features + - Animation: The ArkTS widget supports the [attribute animation](../reference/arkui-ts/ts-animatorproperty.md) and [explicit animation](../reference/arkui-ts/ts-explicit-animation.md) capabilities, which can be leveraged to deliver a more engaging experience. + - Custom drawing: The ArkTS widget allows you to draw graphics with the [Canvas](../reference/arkui-ts/ts-components-canvas-canvas.md) component to present information more vividly. + - Logic code execution: The capability to run logic code in widgets means that service logic can be self-closed in widgets, expanding the service application scenarios of widgets. + + +## Constraints on ArkTS Widgets + +Compared with JS widgets, ArkTS widgets provide more capabilities, but they are also more prone to malicious behavior. The ArkTS widget is displayed in the widget host, which is usually the home screen. To ensure user experience and power consumption, the ArkTS widget capability is restricted as follows: + +- The .so file cannot be loaded. + +- The native programming language cannot be used for development. + +- Only [partial](arkts-ui-widget-page-overview.md) components, events, animations, data management, state management, and API capabilities of the declarative paradigm are supported. + +- The event processing of the widget is independent of that of the widget host. It is recommended that you do not use the left and right sliding components when the widget host supports left and right swipes to prevent gesture conflicts. + +The following features are coming to ArkTS widgets in later versions: + +- Breakpoint debugging + +- import statements + +- Instant preview diff --git a/en/application-dev/application-models/common-event-remove-sticky.md b/en/application-dev/application-models/common-event-remove-sticky.md new file mode 100644 index 0000000000000000000000000000000000000000..358cf8ccf912e0c329684ff904207b933713835b --- /dev/null +++ b/en/application-dev/application-models/common-event-remove-sticky.md @@ -0,0 +1,36 @@ +# Removing Sticky Common Events + + +## When to Use + +Subscribers can receive sticky common events that have been sent. If the events are no longer forwarded, the event publisher needs to remove them. OpenHarmony provides an API for removing sticky common events. + +## Available APIs + +For details, see [Common Event](../reference/apis/js-apis-commonEventManager.md) + +| Name| Description| +| -------- | -------- | +| removeStickyCommonEvent(event: string, callback: AsyncCallback\): void | Removes a sticky common event.| + + +## How to Develop + +1. Import the module. + + ```ts + import commonEventManager from '@ohos.commonEventManager'; + ``` + +2. The sticky common event to be removed must have been released by the application. For details about how to release sticky common events, see [Publishing Common Events](common-event-publish.md). + + ```ts + CommonEventManager.removeStickyCommonEvent("sticky_event", (err) => { // sticky_event indicates the name of the sticky common event to remove. + if (err) { + console.info(`Remove sticky event AsyncCallback failed, errCode: ${err.code}, errMes: ${err.message}`); + return; + } + console.info(`Remove sticky event AsyncCallback success`); + } + }); + ``` diff --git a/en/application-dev/application-models/component-startup-rules.md b/en/application-dev/application-models/component-startup-rules.md index 0e6c2ce33c68913221c7b09f02e96327b0ea1c30..bddf63dbc69ea243733e6f60f67f92a854833bf7 100644 --- a/en/application-dev/application-models/component-startup-rules.md +++ b/en/application-dev/application-models/component-startup-rules.md @@ -23,22 +23,22 @@ In view of this, OpenHarmony formulates a set of component startup rules, as fol - **Before starting a component of another application, verify the visible field of the target component.** - - If the **visible** field of the target component is **false**, verify the **ohos.permission.START_INVISIBLE_ABILITY** permission. - - For details, see [Component Visible Configuration](../quick-start/module-configuration-file.md#abilities). + - If the **exported** field of the target component is **false**, verify the **ohos.permission.START_INVISIBLE_ABILITY** permission. + - For details, see [Component exported Configuration](../quick-start/module-configuration-file.md#abilities). - **Before starting a component of a background application, verify the BACKGROUND permission.** - An application is considered as a foreground application only when the application process gains focus or its UIAbility component is running in the foreground. - Verify the **ohos.permission.START_ABILITIES_FROM_BACKGROUND** permission. -- **When the startAbilityByCall() method is used, verify the call permission.** For details, see [Using Ability Call to Implement UIAbility Interaction](uiability-intra-device-interaction.md#using-ability-call-to-implement-uiability-interaction) and [Using Cross-Device Ability Call](hop-multi-device-collaboration.md#using-cross-device-ability-call). +- **When the startAbilityByCall() method is used, verify the call permission.** For details, see [Using Call to Implement UIAbility Interaction](uiability-intra-device-interaction.md#using-call-to-implement-uiability-interaction) and [Using Cross-Device Call](hop-multi-device-collaboration.md#using-cross-device-call). - Verify the **ohos.permission.ABILITY_BACKGROUND_COMMUNICATION** permission. > **NOTE** > > - Component startup control has been implemented since OpenHarmony v3.2 Release. -> -> - The new component startup rules are more strict than the original ones. You must be familiar with the new startup rules to prevent service exceptions. +> +> - The new component startup rules are more strict than the original ones. You must be familiar with the new startup rules to prevent service exceptions. ## Intra-Device Component Startup Rules diff --git a/en/application-dev/application-models/context-switch.md b/en/application-dev/application-models/context-switch.md index 2f52158f5d36be8c59f747376195e9e43078d1f9..e1d155c8a60f6ca3e225174aece28738663b8079 100644 --- a/en/application-dev/application-models/context-switch.md +++ b/en/application-dev/application-models/context-switch.md @@ -5,7 +5,7 @@ | -------- | -------- | -------- | | [getOrCreateLocalDir(callback:AsyncCallback<string>):void;](../reference/apis/js-apis-inner-app-context.md#contextgetorcreatelocaldir7)
[getOrCreateLocalDir():Promise<string>;](../reference/apis/js-apis-inner-app-context.md#contextgetorcreatelocaldir7-1) | There is no corresponding API in the stage model.| Applications developed on the stage model do not have the operation permission in the application root directory. Therefore, no corresponding API is provided.| | [verifyPermission(permission:string,options:PermissionOptions,callback:AsyncCallback<number>):void;](../reference/apis/js-apis-inner-app-context.md#contextverifypermission7)
[verifyPermission(permission:string,callback:AsyncCallback<number>):void;](../reference/apis/js-apis-inner-app-context.md#contextverifypermission7-1)
[verifyPermission(permission:string,options?:PermissionOptions):Promise<number>;](../reference/apis/js-apis-inner-app-context.md#contextverifypermission7-2) | \@ohos.abilityAccessCtrl.d.ts | [verifyAccessTokenSync(tokenID: number, permissionName: Permissions): GrantStatus;](../reference/apis/js-apis-abilityAccessCtrl.md#verifyaccesstokensync9)
[verifyAccessToken(tokenID: number, permissionName: Permissions): Promise<GrantStatus>;](../reference/apis/js-apis-abilityAccessCtrl.md#verifyaccesstoken9) | -| [requestPermissionsFromUser(permissions:Array<string>,requestCode:number,resultCallback:AsyncCallback<PermissionRequestResult>):void;](../reference/apis/js-apis-inner-app-context.md#contextrequestpermissionsfromuser7)
[requestPermissionsFromUser(permissions:Array<string>,requestCode:number):Promise<PermissionRequestResult>;](../reference/apis/js-apis-inner-app-context.md#contextrequestpermissionsfromuser7-1) | application\UIAbilityContext.d.ts | [requestPermissionsFromUser(permissions: Array<string>, requestCallback: AsyncCallback<PermissionRequestResult>) : void;](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextrequestpermissionsfromuser)
[requestPermissionsFromUser(permissions: Array<string>) : Promise<PermissionRequestResult>;](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextrequestpermissionsfromuser-1) | +| [requestPermissionsFromUser(permissions:Array<string>,requestCode:number,resultCallback:AsyncCallback<PermissionRequestResult>):void;](../reference/apis/js-apis-inner-app-context.md#contextrequestpermissionsfromuser7)
[requestPermissionsFromUser(permissions:Array<string>,requestCode:number):Promise<PermissionRequestResult>;](../reference/apis/js-apis-inner-app-context.md#contextrequestpermissionsfromuser7-1) | \@ohos.abilityAccessCtrl.d.ts | [requestPermissionsFromUser(context: Context, permissionList: Array<Permissions>, requestCallback: AsyncCallback<PermissionRequestResult>) : void;](../reference/apis/js-apis-abilityAccessCtrl.md#requestpermissionsfromuser9)
[requestPermissionsFromUser(context: Context, permissionList: Array<Permissions>) : Promise<PermissionRequestResult>;](../reference/apis/js-apis-abilityAccessCtrl.md#requestpermissionsfromuser9-1) | | [getApplicationInfo(callback:AsyncCallback<ApplicationInfo>):void;](../reference/apis/js-apis-inner-app-context.md#contextgetapplicationinfo7)
[getApplicationInfo():Promise<ApplicationInfo>;](../reference/apis/js-apis-inner-app-context.md#contextgetapplicationinfo7-1) | application\Context.d.ts | [applicationInfo: ApplicationInfo;](../reference/apis/js-apis-inner-application-context.md#attributes)| | [getBundleName(callback : AsyncCallback<string>): void;](../reference/apis/js-apis-inner-app-context.md#contextgetbundlename7)
[getBundleName(): Promise<string>;](../reference/apis/js-apis-inner-app-context.md#contextgetbundlename7-1) | application\UIAbilityContext.d.ts | [abilityInfo.bundleName: string;](../reference/apis/js-apis-inner-application-uiAbilityContext.md#attributes)| | [getDisplayOrientation(callback : AsyncCallback<bundle.DisplayOrientation>): void;](../reference/apis/js-apis-inner-app-context.md#contextgetdisplayorientation7)
[getDisplayOrientation(): Promise<bundle.DisplayOrientation>;](../reference/apis/js-apis-inner-app-context.md#contextgetdisplayorientation7-1) | \@ohos.screen.d.ts | [readonly orientation: Orientation;](../reference/apis/js-apis-screen.md#orientation) | diff --git a/en/application-dev/application-models/create-pageability.md b/en/application-dev/application-models/create-pageability.md index 783646ff4cfd5fa2ab193005bfa9d182dc75b70c..d0f308ebc08e035d5568ee0e127a9739e400d124 100644 --- a/en/application-dev/application-models/create-pageability.md +++ b/en/application-dev/application-models/create-pageability.md @@ -76,22 +76,22 @@ In the FA model, you can call **getContext** of **featureAbility** to obtain the The following code snippet shows how to use **getContext()** to obtain the application context and distributed directory: ```ts -import featureAbility from '@ohos.ability.featureAbility' -import fileIo from '@ohos.fileio' +import featureAbility from '@ohos.ability.featureAbility'; +import fs from '@ohos.file.fs'; (async () => { - let dir: string + let dir: string; try { - console.info('Begin to getOrCreateDistributedDir') - dir = await featureAbility.getContext().getOrCreateDistributedDir() + console.info('Begin to getOrCreateDistributedDir'); + dir = await featureAbility.getContext().getOrCreateDistributedDir(); console.info('distribute dir is ' + dir) } catch (error) { - console.error('getOrCreateDistributedDir failed with ' + error) + console.error('getOrCreateDistributedDir failed with ' + error); } let fd: number; let path = dir + "/a.txt"; - fd = fileIo.openSync(path, 0o2 | 0o100, 0o666); - fileIo.close(fd); + fd = fs.openSync(path, fs.OpenMode.READ_WRITE).fd; + fs.close(fd); })() ``` diff --git a/en/application-dev/application-models/data-share-via-want.md b/en/application-dev/application-models/data-share-via-want.md index a057eb5c2b4796201cdd8bf35344ab600cfe0be0..d5512e0c446b94dcf384504f11ff25d458cfeafc 100644 --- a/en/application-dev/application-models/data-share-via-want.md +++ b/en/application-dev/application-models/data-share-via-want.md @@ -2,20 +2,20 @@ Users often need to share data (such as a text or an image) from one application to another. The following uses PDF file sharing as an example to describe how to use Want to share data between applications. -Data sharing requires two UIAbility components (one for the sharing party and the other for the shared party) and one system component (used as the application selector). When the sharing party initiates data sharing by calling **startAbility()**, the system implicitly matches and displays all applications that support the type of data to share. After the user selects an application, the system starts the application to complete data sharing. +Data sharing requires two UIAbility components (one for the sharing party and the other for the shared party) and one system component (used as the application sharing box). When the sharing party initiates data sharing by calling **startAbility()**, the system implicitly matches and displays all applications that support the type of data to share. After the user selects an application, the system starts the application to complete data sharing. In this section, data sharing is triggered by touching a button. You can use other ways to trigger data sharing during application development. This section focuses on how to configure Want to implement data sharing. The following actions are involved for data sharing: -- **ohos.want.action.select**: action of starting the application selector. +- **ohos.want.action.select**: action of starting the application sharing box. - **ohos.want.action.sendData**: action of sending a single data record, that is, transferring data to the shared party. ## Sharing Party -The sharing party starts an application selector and transfers the data to the shared party. Therefore, Want of the sharing party must be nested at two layers. In the first layer, implicit Want is used together with the **ohos.want.action.select** action to display the application selector. In the second layer, the data to share is declared +The sharing party starts an application sharing box and transfers the data to the shared party. Therefore, Want of the sharing party must be nested at two layers. In the first layer, implicit Want is used together with the **ohos.want.action.select** action to display the application sharing box. In the second layer, the data to share is declared -in the custom field **parameters**, and then the Want that includes the **ohos.want.action.sendData** action and the **parameters** field is transferred to the application selector. The shared party obtains the shared data from **parameters**. +in the custom field **parameters**, and then the Want that includes the **ohos.want.action.sendData** action and the **parameters** field is transferred to the application sharing box. The shared party obtains the shared data from **parameters**. ```ts import common from '@ohos.app.ability.common'; @@ -28,21 +28,21 @@ let fileSize; // Obtain the size of the file to share. function implicitStartAbility() { let context = getContext(this) as common.UIAbilityContext; let wantInfo = { - / This action is used to implicitly match the application selector. + / This action is used to implicitly match the application sharing box. action: 'ohos.want.action.select', // This is the custom parameter in the first layer of Want, - / which is intended to add information to the application selector. + / which is intended to add information to the application sharing box. parameters: { // MIME type of PDF. - "ability.picker.type": fileType, - "ability.picker.fileNames": [fileName], - "ability.picker.fileSizes": [fileSize], + 'ability.picker.type': fileType, + 'ability.picker.fileNames': [fileName], + 'ability.picker.fileSizes': [fileSize], // This is nested Want ,which will be directly sent to the selected application. - "ability.want.params.INTENT": { - "action": "ohos.want.action.sendData", - "type": "application/pdf", - "parameters": { - "keyFd": { "type": "FD", "value": fileFd } + 'ability.want.params.INTENT': { + 'action': 'ohos.want.action.sendData', + 'type': 'application/pdf', + 'parameters': { + 'keyFd': { 'type': 'FD', 'value': fileFd } } } } @@ -59,14 +59,15 @@ function implicitStartAbility() { > > Data sharing can be implemented only in FD format. For details about how to obtain the FD and file name, see [File Management](../reference/apis/js-apis-file-fs.md). -In the preceding code, under the custom field **parameters**, the following **ability.picker.*** fields are used to pass the information to be displayed on the application selector: +In the preceding code, under the custom field **parameters**, the following **ability.picker.*** fields are used to pass the information to be displayed on the application sharing box: - **ability.picker.type**: file type icon. - **ability.picker.fileNames**: file name. - **ability.picker.fileSizes**: file size, in bytes. - **ability.picker.fileNames** and **ability.picker.fileSizes** are arrays and have a one-to-one mapping. -The following figure shows an example. +The following figure shows an example. + ![](figures/ability-startup-with-implicit-want2.png) ## Shared Party diff --git a/en/application-dev/application-models/datashareextensionability.md b/en/application-dev/application-models/datashareextensionability.md index f671848f890277af92fc23869c5db0d57b02a316..bea3de69c6d7ad375206fb1d53bcc36c2624989d 100644 --- a/en/application-dev/application-models/datashareextensionability.md +++ b/en/application-dev/application-models/datashareextensionability.md @@ -1,4 +1,4 @@ # DataShareExtensionAbility (for System Applications Only) -DataShareExtensionAbility provides the data sharing capability. System applications can implement a DataShareExtensionAbility or access an existing DataShareExtensionAbility in the system. Third-party applications can only access an existing DataShareExtensionAbility. For details, see [DataShare Development](../database/database-datashare-guidelines.md). +DataShareExtensionAbility provides the data sharing capability. System applications can implement a DataShareExtensionAbility or access an existing DataShareExtensionAbility in the system. Third-party applications can only access an existing DataShareExtensionAbility. For details, see [Cross-Application Data Sharing Overview](../database/share-device-data-across-apps-overview.md). diff --git a/en/application-dev/application-models/enterprise-extensionAbility.md b/en/application-dev/application-models/enterprise-extensionAbility.md index 3750b2298ba5321194d827edde4ad7c526b467f7..0038b41e5b4f654d8c7924ec1232bb342dd616cb 100644 --- a/en/application-dev/application-models/enterprise-extensionAbility.md +++ b/en/application-dev/application-models/enterprise-extensionAbility.md @@ -6,10 +6,7 @@ EnterpriseAdminExtensionAbility is a mandatory component for Mobile Device Manag ## Constraints -- **Function constraints** - - EnterpriseAdminExtensionAbility is applicable only to enterprise administrator applications. - +EnterpriseAdminExtensionAbility is applicable only to enterprise administrator applications. ## Observing Activation/Deactivation of a Device Administrator Application and Installation/Removal of an Application @@ -25,11 +22,11 @@ EnterpriseAdminExtensionAbility is a mandatory component for Mobile Device Manag ### Available APIs -| Class | API | Description | -| :------------------------------ | ----------------------------------------- | ---------------------------- | +| Class | API | Description | +| ------------------------------ | ----------------------------------------- | ---------------------------- | +| EnterpriseAdminExtensionAbility | onAdminEnabled(): void | Called when a device administrator application is activated. | | EnterpriseAdminExtensionAbility | onAdminDisabled(): void | Called when a device administrator application is deactivated.| | EnterpriseAdminExtensionAbility | onBundleAdded(bundleName: string): void | Called when an application is installed on a device. | -| EnterpriseAdminExtensionAbility | onAdminEnabled(): void | Called when a device administrator application is activated. | | EnterpriseAdminExtensionAbility | onBundleRemoved(bundleName: string): void | Called when an application is removed from a device. | ### How to Develop @@ -78,10 +75,9 @@ To implement EnterpriseAdminExtensionAbility, you need to activate the device ad ## Example -Use **subscribeManagedEvent()** and **unsubscribeManagedEvent()** in the **@ohos.enterprise.adminManager** module to subscribe to application installation and removal events. When an application is installed or removed, the MDM application is notified of the event. Then, the MDM application reports the event in the callback to notify the enterprise administrator. +Use **subscribeManagedEvent** in the **@ohos.enterprise.adminManager** module to subscribe to application installation and removal events. When an application is installed or removed, the MDM application is notified of the event. Then, the MDM application reports the event in the callback to notify the enterprise administrator. To unsubscribe from events, use **unsubscribeManagedEvent**. ```ts - @State managedEvents: Array = [0,1] @State subscribeManagedEventMsg: string = "" @State unsubscribeManagedEventMsg: string = "" diff --git a/en/application-dev/application-models/explicit-implicit-want-mappings.md b/en/application-dev/application-models/explicit-implicit-want-mappings.md index 16854efb9236dc6bdc9fbe990c9cbe3581495633..9e748a31795e3afc713e7091067a8164e8a623cc 100644 --- a/en/application-dev/application-models/explicit-implicit-want-mappings.md +++ b/en/application-dev/application-models/explicit-implicit-want-mappings.md @@ -62,7 +62,7 @@ The system matches the **action** attribute in the **want** parameter passed by **Figure 1** Matching rules of action in the want parameter - ![want-action](figures/want-action.png) + ![want-action](figures/want-action.png) ### Matching Rules of entities in the want Parameter @@ -79,19 +79,15 @@ The system matches the **entities** attribute in the **want** parameter passed b - If **entities** in the passed **want** parameter is specified, and **entities** under **skills** of an ability is specified but does not contain **entities** in the passed **want** parameter, the matching fails. - Figure 2 Matching rule of entities in the want parameter + **Figure 2** Matching rule of entities in the want parameter - ![want-entities](figures/want-entities.png) + ![want-entities](figures/want-entities.png) ### Matching Rules of uri and type in the want Parameter When the **uri** and **type** parameters are specified in the **want** parameter to initiate a component startup request, the system traverses the list of installed components and matches the **uris** array under **skills** of the abilities one by one. If one of the **uris** arrays under **skills** matches the **uri** and **type** in the passed **want**, the matching is successful. -Figure 3 Matching rules when uri and type are specified in the want parameter - -![want-uri-type1](figures/want-uri-type1.png) - There are four combinations of **uri** and **type** settings. The matching rules are as follows: - Neither **uri** or **type** is specified in the **want** parameter. @@ -111,11 +107,17 @@ There are four combinations of **uri** and **type** settings. The matching rules - If the **uris** array under **skills** of an ability is unspecified, the matching fails. - If the **uris** array under **skills** of an ability contains an element whose [uri is matched](#matching-rules-of-uri) and [type is matched](#matching-rules-of-type), the matching is successful. Otherwise, the matching fails. +Leftmost URI matching: When only **scheme**, a combination of **scheme** and **host**, or a combination of **scheme**, **host**, and **port** is configured in the **uris** array under **skills** of the ability, +the matching is successful only if the leftmost URI in the passed **want** parameter matches **scheme**, the combination of **scheme** and **host**, or the combination of **scheme**, **host**, and **port**. -To simplify the description, **uri** and **type** passed in the **want** parameter are called **w_uri** and **w_type**, respectively; the **uris** array under **skills** of an ability to match is called **s_uris**; each element in the array is called **s_uri**. Matching is performed from top to bottom. +**Figure 3** Matching rules when uri and type are specified in the want parameter + ![want-uri-type1](figures/want-uri-type1.png) + + +To simplify the description, **uri** and **type** passed in the **want** parameter are called **w_uri** and **w_type**, respectively; the **uris** array under **skills** of an ability to match is called **s_uris**; each element in the array is called **s_uri**. Matching is performed from top to bottom. -Figure 4 Matching rules of uri and type in the want parameter +**Figure 4** Matching rules of uri and type in the want parameter ![want-uri-type2](figures/want-uri-type2.png) @@ -128,7 +130,9 @@ To simplify the description, **uri** in the passed **want** parameter is called - If **host** of **s_uri** is unspecified and **scheme** of **w_uri** and **scheme** of **s_uri** are the same, the matching is successful. Otherwise, the matching fails. -- If **path**, **pathStartWith**, and **pathRegex** of **s_uri** are unspecified and **w_uri** and **s_uri** are the same, the matching is successful. Otherwise, the matching fails. +- If **port** of **s_uri** is unspecified and the combination of **scheme** and **host** of **w_uri** is the same as the combination of **scheme** and **host** of **s_uri**, the matching is successful. Otherwise, the matching fails. + +- If **path**, **pathStartWith**, and **pathRegex** of **s_uri** are unspecified and the combination of **scheme**, **host**, and **port** of **w_uri** is the same as the combination of **scheme**, **host**, and **port** of **s_uri**, the matching is successful. Otherwise, the matching fails. - If **path** of **s_uri** is specified and the **full path expressions** of **w_uri** and **s_uri** are the same, the matching is successful. Otherwise, the matching of **pathStartWith** continues. @@ -139,12 +143,17 @@ To simplify the description, **uri** in the passed **want** parameter is called > **NOTE** > > The **scheme**, **host**, **port**, **path**, **pathStartWith**, and **pathRegex** attributes of **uris** under **skills** of an ability are concatenated. If **path**, **pathStartWith**, and **pathRegex** are declared in sequence, **uris** can be concatenated into the following expressions: -> +> > - **Full path expression**: `scheme://host:port/path` -> +> > - **Prefix expression**: `scheme://host:port/pathStartWith` -> +> > - **Regular expression**: `scheme://host:port/pathRegex` +> +> - **Prefix URI expression**: When only **scheme**, a combination of **scheme** and **host**, or a combination of **scheme**, **host**, and **port** is configured in the configuration file, the matching is successful if a URI prefixed with the configuration file is passed in. +> * `scheme://` +> * `scheme://host` +> * `scheme://host:port` ### Matching Rules of type diff --git a/en/application-dev/application-models/extensionability-overview.md b/en/application-dev/application-models/extensionability-overview.md index 809e4e8f70ed31ad361e18dd8cb7e079ddf93086..d176b2d5322b215ab3d730b59cfc5a8e1f6dfb99 100644 --- a/en/application-dev/application-models/extensionability-overview.md +++ b/en/application-dev/application-models/extensionability-overview.md @@ -25,6 +25,13 @@ An [ExtensionAbilityType](../reference/apis/js-apis-bundleManager.md#extensionab - [EnterpriseAdminExtensionAbility](../reference/apis/js-apis-EnterpriseAdminExtensionAbility.md): ExtensionAbility component of the enterprise_admin type, which provides APIs for processing enterprise management events, such as application installation events on devices and events indicating too many incorrect screen-lock password attempts. +> **NOTE** +> +> 1. Third-party applications cannot implement ServiceExtensionAbility, DataShareExtensionAbility, StaticSubscriberExtensionAbility, or WindowExtensionAbility. +> +> 2. To implement transaction processing in the background for a third-party application, use background tasks rather than ServiceExtensionAbility. For details, see [Background Task](../task-management/background-task-overview.md). +> +> 3. Third-party applications can use other types of ExtensionAbility components that have been defined. ## Using ExtensionAbility of the Specified Type @@ -39,7 +46,7 @@ The following uses [InputMethodExtensionAbility](../reference/apis/js-apis-input ## Implementing ExtensionAbility of the Specified Type -The following uses [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md) as an example. The widget framework provides the base class [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md). You derive this base class to create your own class (such as **MyFormExtensionAbility**), implement the callbacks, such as **onCreate()** and **onUpdateForm()**, to provide specific widget functionalities. For details, see [FormExtensionAbility](Widget-development-stage.md). +The following uses [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md) as an example. The widget framework provides the base class [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md). You derive this base class to create your own class (such as **MyFormExtensionAbility**), implement the callbacks, such as **onCreate()** and **onUpdateForm()**, to provide specific widget functionalities. For details, see [FormExtensionAbility](service-widget-overview.md). You do not need to care when to add or delete a widget. The lifecycle of the FormExtensionAbility instance and the lifecycle of the ExtensionAbility process where the FormExtensionAbility instance is located are scheduled and managed by FormManagerService. @@ -57,3 +64,5 @@ You do not need to care when to add or delete a widget. The lifecycle of the For > - The two FormExtensionAbility components run in an independent process. > > - The two ImeExtensionAbility components run in an independent process. + + \ No newline at end of file diff --git a/en/application-dev/application-models/figures/JSCardPrinciple.png b/en/application-dev/application-models/figures/JSCardPrinciple.png new file mode 100644 index 0000000000000000000000000000000000000000..558c2c2a679737eed2bf3b129f632e6300d0d2da Binary files /dev/null and b/en/application-dev/application-models/figures/JSCardPrinciple.png differ diff --git a/en/application-dev/application-models/figures/WidgerCameraCard.png b/en/application-dev/application-models/figures/WidgerCameraCard.png new file mode 100644 index 0000000000000000000000000000000000000000..55e62cdb8791ca6bbc95b7ea4c054e93270ce7a6 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgerCameraCard.png differ diff --git a/en/application-dev/application-models/figures/WidgetAnimation.gif b/en/application-dev/application-models/figures/WidgetAnimation.gif new file mode 100644 index 0000000000000000000000000000000000000000..eaddfb4cf4d0bf9613b3a108c26ea1617d68d3c1 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetAnimation.gif differ diff --git a/en/application-dev/application-models/figures/WidgetArchitecture.png b/en/application-dev/application-models/figures/WidgetArchitecture.png new file mode 100644 index 0000000000000000000000000000000000000000..b97ddda74fb89f32ed248c1d4c6d3e8e6e100a05 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetArchitecture.png differ diff --git a/en/application-dev/application-models/figures/WidgetCanvasDemo.jpeg b/en/application-dev/application-models/figures/WidgetCanvasDemo.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..9c797ff4575ae0aaf9aad27ae5d4d701181faf97 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetCanvasDemo.jpeg differ diff --git a/en/application-dev/application-models/figures/WidgetCardPage.png b/en/application-dev/application-models/figures/WidgetCardPage.png new file mode 100644 index 0000000000000000000000000000000000000000..795e96171e6d890e72a09382906302dd0fa45fab Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetCardPage.png differ diff --git a/en/application-dev/application-models/figures/WidgetLocalStorageProp.png b/en/application-dev/application-models/figures/WidgetLocalStorageProp.png new file mode 100644 index 0000000000000000000000000000000000000000..1a45723865ff9f990c3a4197338e9cbc9eb3b6f4 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetLocalStorageProp.png differ diff --git a/en/application-dev/application-models/figures/WidgetModules.png b/en/application-dev/application-models/figures/WidgetModules.png new file mode 100644 index 0000000000000000000000000000000000000000..6eaac0b6ca404eb9575587add72935e9ce580030 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetModules.png differ diff --git a/en/application-dev/application-models/figures/WidgetPostCardAction.png b/en/application-dev/application-models/figures/WidgetPostCardAction.png new file mode 100644 index 0000000000000000000000000000000000000000..42a07e13036d6252309ca4e4bc857043486269c8 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetPostCardAction.png differ diff --git a/en/application-dev/application-models/figures/WidgetPreviewPage.png b/en/application-dev/application-models/figures/WidgetPreviewPage.png new file mode 100644 index 0000000000000000000000000000000000000000..5f614e3db780f8d83f3c2f0865a9521aacd2b0de Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetPreviewPage.png differ diff --git a/en/application-dev/application-models/figures/WidgetPrinciple.png b/en/application-dev/application-models/figures/WidgetPrinciple.png new file mode 100644 index 0000000000000000000000000000000000000000..588975d0095de58d0d220809ba77aec541a64984 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetPrinciple.png differ diff --git a/en/application-dev/application-models/figures/WidgetProject.png b/en/application-dev/application-models/figures/WidgetProject.png new file mode 100644 index 0000000000000000000000000000000000000000..788bb3ac63ca5727527bd104f76689f762b7b33d Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetProject.png differ diff --git a/en/application-dev/application-models/figures/WidgetProjectCreate1.png b/en/application-dev/application-models/figures/WidgetProjectCreate1.png new file mode 100644 index 0000000000000000000000000000000000000000..5369f48edcee476ae8317b9f0e1fb98b06607e93 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetProjectCreate1.png differ diff --git a/en/application-dev/application-models/figures/WidgetProjectCreate2.png b/en/application-dev/application-models/figures/WidgetProjectCreate2.png new file mode 100644 index 0000000000000000000000000000000000000000..7bf742e04e2f30febb05f2d8638193dc10532863 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetProjectCreate2.png differ diff --git a/en/application-dev/application-models/figures/WidgetProjectCreate3.png b/en/application-dev/application-models/figures/WidgetProjectCreate3.png new file mode 100644 index 0000000000000000000000000000000000000000..98429567ad24b1a83c67118173bf6cb504bea25d Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetProjectCreate3.png differ diff --git a/en/application-dev/application-models/figures/WidgetProjectView.png b/en/application-dev/application-models/figures/WidgetProjectView.png new file mode 100644 index 0000000000000000000000000000000000000000..9d1c06e47502131983b0b7cd56e66269b5be6d88 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetProjectView.png differ diff --git a/en/application-dev/application-models/figures/WidgetRender.png b/en/application-dev/application-models/figures/WidgetRender.png new file mode 100644 index 0000000000000000000000000000000000000000..228128b143995fec75c71c4172e3d90ca15177f6 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetRender.png differ diff --git a/en/application-dev/application-models/figures/WidgetSupportApi.png b/en/application-dev/application-models/figures/WidgetSupportApi.png new file mode 100644 index 0000000000000000000000000000000000000000..1ac3d68c19683c69a16f5ebc305f3b79cb8c6566 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetSupportApi.png differ diff --git a/en/application-dev/application-models/figures/WidgetUpdatePage.png b/en/application-dev/application-models/figures/WidgetUpdatePage.png new file mode 100644 index 0000000000000000000000000000000000000000..075c8e97c85386c062a651f3b4f876e6c049171f Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetUpdatePage.png differ diff --git a/en/application-dev/application-models/figures/WidgetUse.png b/en/application-dev/application-models/figures/WidgetUse.png new file mode 100644 index 0000000000000000000000000000000000000000..9bcc46d9044b04633692171f6a553bf43ce147c3 Binary files /dev/null and b/en/application-dev/application-models/figures/WidgetUse.png differ diff --git a/en/application-dev/application-models/figures/mission-and-multiton.png b/en/application-dev/application-models/figures/mission-and-multiton.png new file mode 100644 index 0000000000000000000000000000000000000000..e50f9d44d475711c17bfe56394fddd8a6c7b784c Binary files /dev/null and b/en/application-dev/application-models/figures/mission-and-multiton.png differ diff --git a/en/application-dev/application-models/figures/uiability-launch-type1.gif b/en/application-dev/application-models/figures/uiability-launch-type1.gif new file mode 100644 index 0000000000000000000000000000000000000000..d31a51a1aa97d32ee0ee7df4803378c1b7124119 Binary files /dev/null and b/en/application-dev/application-models/figures/uiability-launch-type1.gif differ diff --git a/en/application-dev/application-models/figures/uiability-launch-type1.png b/en/application-dev/application-models/figures/uiability-launch-type1.png deleted file mode 100644 index c4f5aa4b9a988d8e7148b504c4dcc163961cb103..0000000000000000000000000000000000000000 Binary files a/en/application-dev/application-models/figures/uiability-launch-type1.png and /dev/null differ diff --git a/en/application-dev/application-models/figures/uiability-launch-type2.gif b/en/application-dev/application-models/figures/uiability-launch-type2.gif new file mode 100644 index 0000000000000000000000000000000000000000..ecb3c413e3af2f92ef6834024d0d413e30c2419f Binary files /dev/null and b/en/application-dev/application-models/figures/uiability-launch-type2.gif differ diff --git a/en/application-dev/application-models/figures/uiability-launch-type2.png b/en/application-dev/application-models/figures/uiability-launch-type2.png deleted file mode 100644 index 6f0e43d24f745aee41601cc48f4bc138572fbeb5..0000000000000000000000000000000000000000 Binary files a/en/application-dev/application-models/figures/uiability-launch-type2.png and /dev/null differ diff --git a/en/application-dev/application-models/figures/uiability-launch-type3.gif b/en/application-dev/application-models/figures/uiability-launch-type3.gif new file mode 100644 index 0000000000000000000000000000000000000000..029e8ba7e90eb836f8466c604d4fcf8171ffec6e Binary files /dev/null and b/en/application-dev/application-models/figures/uiability-launch-type3.gif differ diff --git a/en/application-dev/application-models/figures/want-uri-type1.png b/en/application-dev/application-models/figures/want-uri-type1.png index e0fe40d1a3cd40b72379bd947aaf2e3977021b32..ed53694a9608e8529c5e4633fca42b041bc7ab76 100644 Binary files a/en/application-dev/application-models/figures/want-uri-type1.png and b/en/application-dev/application-models/figures/want-uri-type1.png differ diff --git a/en/application-dev/application-models/hop-cross-device-migration.md b/en/application-dev/application-models/hop-cross-device-migration.md index d90a10995f0aeba773179fc7807ab25711b4594c..c51e82e15f4e14f4b42b25e656543a01d84406fb 100644 --- a/en/application-dev/application-models/hop-cross-device-migration.md +++ b/en/application-dev/application-models/hop-cross-device-migration.md @@ -1,4 +1,4 @@ -# Cross-Device Migration (for System Applications Only)] +# Cross-Device Migration (for System Applications Only) ## When to Use @@ -47,25 +47,16 @@ The table below describes the main APIs used for cross-device migration. For det ## How to Develop -1. Configure the data synchronization permission in the **module.json5** file. The sample code is as follows: - - ```json - { - "module": { - "requestPermissions":[ - { - "name" : "ohos.permission.DISTRIBUTED_DATASYNC", - } - ] - } - } - ``` +1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). -2. Configure the fields related to cross-device migration in the configuration file. - - Configure the application to support migration. - +2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). - Set the **continuable** field in the **module.json5** file to **true**. The default value is **false**. If this parameter is set to **false**, the application cannot be continued on the target device. +3. Configure the fields related to cross-device migration in the configuration file. + + Configure the application to support migration. + Set the **continuable** field in the **module.json5** file to **true**. The default value is **false**. If this parameter is set to **false**, the application cannot be continued on the target device. + + ```json { "module": { @@ -80,47 +71,31 @@ The table below describes the main APIs used for cross-device migration. For det } ``` - - Configure the application launch type. For details, see [UIAbility Component Launch Type](uiability-launch-type.md). - -3. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows: - - ```ts - requestPermission() { - let context = this.context - let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'] - context.requestPermissionsFromUser(permissions).then((data) => { - console.info("Succeed to request permission from user with data: "+ JSON.stringify(data)) - }).catch((error) => { - console.info("Failed to request permission from user with error: "+ JSON.stringify(error)) - }) - } - ``` + Configure the application launch type. For details, see [UIAbility Component Launch Type](uiability-launch-type.md). 4. Implement [onContinue()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncontinue) in the UIAbility of the initiator. - [onContinue()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncontinue) is called on the initiator. You can save the data in this method to implement application compatibility check and migration decision. - - Saving migrated data: You can save the data to be migrated in key-value pairs in **wantParam**. - Checking application compatibility: You can obtain the version number of the target application from **wantParam** and check the compatibility between the target application and the current application. - Making a migration decision: You can determine whether to support the migration based on the return value of **onContinue()**. For details about the return value, see [Available APIs](#available-apis). - The sample code is as follows: - - ```ts - import UIAbility from '@ohos.app.ability.UIAbility'; - import AbilityConstant from '@ohos.app.ability.AbilityConstant'; - - onContinue(wantParam : {[key: string]: any}) { - console.info(`onContinue version = ${wantParam.version}, targetDevice: ${wantParam.targetDevice}`) - let workInput = AppStorage.Get('ContinueWork'); - // Set the user input data into wantParam. - wantParam["work"] = workInput // set user input data into want params - console.info(`onContinue input = ${wantParam["input"]}`); - return AbilityConstant.OnContinueResult.AGREE - } - ``` + The sample code is as follows: + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility'; + import AbilityConstant from '@ohos.app.ability.AbilityConstant'; + + onContinue(wantParam : {[key: string]: any}) { + console.info(`onContinue version = ${wantParam.version}, targetDevice: ${wantParam.targetDevice}`) + let workInput = AppStorage.Get('ContinueWork'); + // Set the user input data into wantParam. + wantParam["work"] = workInput // set user input data into want params + console.info(`onContinue input = ${wantParam["input"]}`); + return AbilityConstant.OnContinueResult.AGREE + } + ``` 5. Implement **onCreate()** and **onNewWant()** in the UIAbility of the target application to implement data restoration. - Implementation example of **onCreate** in the multi-instance scenario diff --git a/en/application-dev/application-models/hop-multi-device-collaboration.md b/en/application-dev/application-models/hop-multi-device-collaboration.md index f5d82af32da86796d81dc1bebed1d6ff804f2532..b761037182f27367e9c01488de41aaa23b6b25d2 100644 --- a/en/application-dev/application-models/hop-multi-device-collaboration.md +++ b/en/application-dev/application-models/hop-multi-device-collaboration.md @@ -5,13 +5,13 @@ Multi-device coordination involves the following scenarios: -- [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned) +- [Starting UIAbility or ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-or-serviceextensionability-across-devices-no-data-returned) - [Starting UIAbility Across Devices (Data Returned)](#starting-uiability-across-devices-data-returned) - [Connecting to ServiceExtensionAbility Across Devices](#connecting-to-serviceextensionability-across-devices) -- [Using Cross-Device Ability Call](#using-cross-device-ability-call) +- [Using Cross-Device Call](#using-cross-device-call) ## Multi-Device Collaboration Process @@ -31,9 +31,9 @@ The figure below shows the multi-device collaboration process. - For better user experience, you are advised to use the **want** parameter to transmit data smaller than 100 KB. -## Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned) +## Starting UIAbility or ServiceExtensionAbility Across Devices (No Data Returned) -On device A, touch the **Start** button provided by the initiator application to start a specified UIAbility on device B. +On device A, touch the **Start** button provided by the initiator application to start a specified UIAbility or ServiceExtensionAbility on device B. ### Available APIs @@ -42,29 +42,19 @@ On device A, touch the **Start** button provided by the initiator application to | **API**| **Description**| | -------- | -------- | -| startAbility(want: Want, callback: AsyncCallback<void>): void; | Starts UIAbility and ServiceExtensionAbility. This API uses an asynchronous callback to return the result.| +| startAbility(want: Want, callback: AsyncCallback<void>): void; | Starts a UIAbility or ServiceExtensionAbility. This API uses an asynchronous callback to return the result.| +| stopServiceExtensionAbility(want: Want, callback: AsyncCallback<void>): void; | Stops a ServiceExtensionAbility. This API uses an asynchronous callback to return the result.| +| stopServiceExtensionAbility(want: Want): Promise<void>; | Stops a ServiceExtensionAbility. This API uses a promise to return the result.| ### How to Develop -1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). +1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). -2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows: - - ```ts - requestPermission() { - let context = this.context; - let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']; - context.requestPermissionsFromUser(permissions).then((data) => { - console.info("Succeed to request permission from user with data: "+ JSON.stringify(data)); - }).catch((error) => { - console.info("Failed to request permission from user with error: "+ JSON.stringify(error)); - }) - } - ``` +2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). 3. Obtain the device ID of the target device. - + ```ts import deviceManager from '@ohos.distributedHardware.deviceManager'; @@ -93,8 +83,8 @@ On device A, touch the **Start** button provided by the initiator application to } ``` -4. Set the target component parameters, and call [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start UIAbility or ServiceExtensionAbility. - +4. Set the target component parameters, and call [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start a UIAbility or ServiceExtensionAbility. + ```ts let want = { deviceId: getRemoteDeviceId(), @@ -102,7 +92,7 @@ On device A, touch the **Start** button provided by the initiator application to abilityName: 'FuncAbility', moduleName: 'module1', // moduleName is optional. } - // context is the ability-level context of the initiator UIAbility. + // context is the AbilityContext of the initiator UIAbility. this.context.startAbility(want).then(() => { // ... }).catch((err) => { @@ -110,6 +100,22 @@ On device A, touch the **Start** button provided by the initiator application to }) ``` +5. Call stopServiceExtensionAbility(../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstopserviceextensionability) to stop the ServiceExtensionAbility when it is no longer required on device B. (This API cannot be used to stop a UIAbility. Users must manually stop a UIAbility through task management.) + + ```ts + let want = { + deviceId: getRemoteDeviceId(), + bundleName: 'com.example.myapplication', + abilityName: 'FuncAbility', + moduleName: 'module1', // moduleName is optional. + } + // Stop the ServiceExtensionAbility started by calling startAbility(). + this.context.stopServiceExtensionAbility(want).then(() => { + console.info("stop service extension ability success") + }).catch((err) => { + console.info("stop service extension ability err is " + JSON.stringify(err)) + }) + ``` ## Starting UIAbility Across Devices (Data Returned) @@ -118,35 +124,23 @@ On device A, touch the **Start** button provided by the initiator application to ### Available APIs -**Table 2** APIs for starting an ability across devices and returning the result data +**Table 2** APIs for starting a UIAbility across devices and returning the result data | API| Description| | -------- | -------- | | startAbilityForResult(want: Want, callback: AsyncCallback<AbilityResult>): void; | Starts a UIAbility. This API uses an asynchronous callback to return the result when the UIAbility is terminated.| -| terminateSelfWithResult(parameter: AbilityResult, callback: AsyncCallback<void>): void;| Terminates this UIAbility. This API uses an asynchronous callback to return the ability result information. It is used together with **startAbilityForResult**.| -| terminateSelfWithResult(parameter: AbilityResult): Promise<void>; | Terminates this UIAbility. This API uses a promise to return the ability result information. It is used together with **startAbilityForResult**.| +| terminateSelfWithResult(parameter: AbilityResult, callback: AsyncCallback<void>): void;| Terminates this UIAbility. This API uses an asynchronous callback to return the result information. It is used together with **startAbilityForResult**.| +| terminateSelfWithResult(parameter: AbilityResult): Promise<void>; | Terminates this UIAbility. This API uses a promise to return the result information. It is used together with **startAbilityForResult**.| ### How to Develop -1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). +1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). -2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows: - - ```ts - requestPermission() { - let context = this.context; - let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']; - context.requestPermissionsFromUser(permissions).then((data) => { - console.info("Succeed to request permission from user with data: "+ JSON.stringify(data)); - }).catch((error) => { - console.info("Failed to request permission from user with error: "+ JSON.stringify(error)); - }) - } - ``` +2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). + +3. Set the target component parameters on the initiator, and call **startAbilityForResult()** to start the target UIAbility. **data** in the asynchronous callback is used to receive the information returned by the target UIAbility to the initiator UIAbility after the target UIAbility terminates itself. For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility or ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-or-serviceextensionability-across-devices-no-data-returned). -3. Set the target component parameters on the initiator, and call **startAbilityForResult()** to start the target UIAbility. **data** in the asynchronous callback is used to receive the information returned by the target UIAbility to the initiator UIAbility after the target UIAbility terminates itself. For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned). - ```ts let want = { deviceId: getRemoteDeviceId(), @@ -154,7 +148,7 @@ On device A, touch the **Start** button provided by the initiator application to abilityName: 'FuncAbility', moduleName: 'module1', // moduleName is optional. } - // context is the ability-level context of the initiator UIAbility. + // context is the AbilityContext of the initiator UIAbility. this.context.startAbilityForResult(want).then((data) => { // ... }).catch((err) => { @@ -163,7 +157,7 @@ On device A, touch the **Start** button provided by the initiator application to ``` 4. After the UIAbility task at the target device is complete, call **terminateSelfWithResult()** to return the data to the initiator UIAbility. - + ```ts const RESULT_CODE: number = 1001; let abilityResult = { @@ -174,20 +168,20 @@ On device A, touch the **Start** button provided by the initiator application to moduleName: 'module1', }, } - // context is the ability-level context of the target UIAbility. + // context is the AbilityContext of the target UIAbility. this.context.terminateSelfWithResult(abilityResult, (err) => { // ... }); ``` 5. The initiator UIAbility receives the information returned by the target UIAbility and processes the information. - + ```ts const RESULT_CODE: number = 1001; // ... - // context is the ability-level context of the initiator UIAbility. + // context is the UIAbilityContext of the initiator UIAbility. this.context.startAbilityForResult(want).then((data) => { if (data?.resultCode === RESULT_CODE) { // Parse the information returned by the target UIAbility. @@ -218,21 +212,9 @@ A system application can connect to a service on another device by calling [conn ### How to Develop -1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). - -2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows: - - ```ts - requestPermission() { - let context = this.context; - let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']; - context.requestPermissionsFromUser(permissions).then((data) => { - console.info("Succeed to request permission from user with data: "+ JSON.stringify(data)); - }).catch((error) => { - console.info("Failed to request permission from user with error: "+ JSON.stringify(error)); - }) - } - ``` +1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + +2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). 3. (Optional) [Implement a background service](serviceextensionability.md#implementing-a-background-service). Perform this operation only if no background service is available. @@ -289,10 +271,10 @@ A system application can connect to a service on another device by calling [conn let connectionId = this.context.connectServiceExtensionAbility(want, options); ``` - For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned). + For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility or ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-or-serviceextensionability-across-devices-no-data-returned). 5. Disconnect the connection. Use **disconnectServiceExtensionAbility()** to disconnect from the background service. - + ```ts let connectionId = 1 // ID returned when the service is connected through connectServiceExtensionAbility. this.context.disconnectServiceExtensionAbility(connectionId).then((data) => { @@ -303,145 +285,134 @@ A system application can connect to a service on another device by calling [conn ``` -## Using Cross-Device Ability Call +## Using Cross-Device Call -The basic principle of cross-device ability call is the same as that of intra-device ability call. For details, see [Using Ability Call to Implement UIAbility Interaction (for System Applications Only)](uiability-intra-device-interaction.md#using-ability-call-to-implement-uiability-interaction-for-system-applications-only). +The basic principle of cross-device call is the same as that of intra-device call. For details, see [Using Call to Implement UIAbility Interaction (for System Applications Only)](uiability-intra-device-interaction.md#using-call-to-implement-uiability-interaction-for-system-applications-only). -The following describes how to implement multi-device collaboration through cross-device ability call. +The following describes how to implement multi-device collaboration through cross-device call. ### Available APIs -**Table 4** Ability call APIs +**Table 4** Call APIs | API| Description| | -------- | -------- | | startAbilityByCall(want: Want): Promise<Caller>; | Starts a UIAbility in the foreground or background and obtains the caller object for communicating with the UIAbility.| -| on(method: string, callback: CalleeCallBack): void | Callback invoked when the callee ability registers a method.| -| off(method: string): void | Callback invoked when the callee ability deregisters a method.| -| call(method: string, data: rpc.Parcelable): Promise<void> | Sends agreed parcelable data to the callee ability.| -| callWithResult(method: string, data: rpc.Parcelable): Promise<rpc.MessageSequence>| Sends agreed parcelable data to the callee ability and obtains the agreed parcelable data returned by the callee ability.| +| on(method: string, callback: CalleeCallBack): void | Callback invoked when the CalleeAbility registers a method.| +| off(method: string): void | Callback invoked when the CalleeAbility deregisters a method.| +| call(method: string, data: rpc.Parcelable): Promise<void> | Sends agreed parcelable data to the CalleeAbility.| +| callWithResult(method: string, data: rpc.Parcelable): Promise<rpc.MessageSequence>| Sends agreed parcelable data to the CalleeAbility and obtains the agreed parcelable data returned by the CalleeAbility.| | release(): void | Releases the caller object.| | on(type: "release", callback: OnReleaseCallback): void | Callback invoked when the caller object is released.| ### How to Develop -1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). - -2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows: - - ```ts - requestPermission() { - let context = this.context; - let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']; - context.requestPermissionsFromUser(permissions).then((data) => { - console.info("Succeed to request permission from user with data: "+ JSON.stringify(data)); - }).catch((error) => { - console.info("Failed to request permission from user with error: "+ JSON.stringify(error)); - }) - } - ``` +1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). -3. Create the callee ability. +2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). + +3. Create the CalleeAbility. - For the callee ability, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener. + For the CalleeAbility, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener. - 1. Configure the launch type of the UIAbility. - Set **launchType** of the callee ability to **singleton** in the **module.json5** file. + 1. Configure the launch type of the UIAbility. - | JSON Field| Description| - | -------- | -------- | - | "launchType"| Ability launch type. Set this parameter to **singleton**.| + Set **launchType** of the CalleeAbility to **singleton** in the **module.json5** file. - An example of the UIAbility configuration is as follows: + | JSON Field| Description| + | -------- | -------- | + | "launchType"| UIAbility launch type. Set this parameter to **singleton**.| - - ```json - "abilities":[{ - "name": ".CalleeAbility", - "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts", - "launchType": "singleton", - "description": "$string:CalleeAbility_desc", - "icon": "$media:icon", - "label": "$string:CalleeAbility_label", - "visible": true - }] - ``` - 2. Import the **UIAbility** module. - - ```ts - import Ability from '@ohos.app.ability.UIAbility'; - ``` - 3. Define the agreed parcelable data. - - The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string. - - - ```ts - export default class MyParcelable { - num: number = 0; - str: string = ""; - - constructor(num, string) { - this.num = num; - this.str = string; - } - - marshalling(messageSequence) { - messageSequence.writeInt(this.num); - messageSequence.writeString(this.str); - return true; - } - - unmarshalling(messageSequence) { - this.num = messageSequence.readInt(); - this.str = messageSequence.readString(); - return true; - } - } - ``` - 4. Implement **Callee.on** and **Callee.off**. + An example of the UIAbility configuration is as follows: - In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate()** of the ability and deregistered in **onDestroy()**. After receiving parcelable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. - ```ts - const TAG: string = '[CalleeAbility]'; - const MSG_SEND_METHOD: string = 'CallSendMsg'; - - function sendMsgCallback(data) { - console.info('CalleeSortFunc called'); - - // Obtain the parcelable data sent by the caller ability. - let receivedData = new MyParcelable(0, ''); - data.readParcelable(receivedData); - console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`); - - // Process the data. - // Return the parcelable data result to the caller ability. - return new MyParcelable(receivedData.num + 1, `send ${receivedData.str} succeed`); - } - - export default class CalleeAbility extends Ability { - onCreate(want, launchParam) { - try { - this.callee.on(MSG_SEND_METHOD, sendMsgCallback); - } catch (error) { - console.info(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`); - } - } - - onDestroy() { - try { - this.callee.off(MSG_SEND_METHOD); - } catch (error) { - console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`); - } - } - } - ``` + ```json + "abilities":[{ + "name": ".CalleeAbility", + "srcEntry": "./ets/CalleeAbility/CalleeAbility.ts", + "launchType": "singleton", + "description": "$string:CalleeAbility_desc", + "icon": "$media:icon", + "label": "$string:CalleeAbility_label", + "exported": true + }] + ``` + 2. Import the **UIAbility** module. + + ```ts + import Ability from '@ohos.app.ability.UIAbility'; + ``` + 3. Define the agreed parcelable data. + + The data formats sent and received by the CallerAbility and CalleeAbility must be consistent. In the following example, the data formats are number and string. -4. Obtain the caller object and access the callee ability. + + ```ts + export default class MyParcelable { + num: number = 0; + str: string = ""; + + constructor(num, string) { + this.num = num; + this.str = string; + } + + marshalling(messageSequence) { + messageSequence.writeInt(this.num); + messageSequence.writeString(this.str); + return true; + } + + unmarshalling(messageSequence) { + this.num = messageSequence.readInt(); + this.str = messageSequence.readString(); + return true; + } + } + ``` + 4. Implement **Callee.on** and **Callee.off**. + + In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate()** of the UIAbility and deregistered in **onDestroy()**. After receiving parcelable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. + + ```ts + const TAG: string = '[CalleeAbility]'; + const MSG_SEND_METHOD: string = 'CallSendMsg'; + + function sendMsgCallback(data) { + console.info('CalleeSortFunc called'); + + // Obtain the parcelable data sent by the CallerAbility. + let receivedData = new MyParcelable(0, ''); + data.readParcelable(receivedData); + console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`); + + // Process the data. + // Return the parcelable data result to the CallerAbility. + return new MyParcelable(receivedData.num + 1, `send ${receivedData.str} succeed`); + } + + export default class CalleeAbility extends Ability { + onCreate(want, launchParam) { + try { + this.callee.on(MSG_SEND_METHOD, sendMsgCallback); + } catch (error) { + console.info(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`); + } + } + + onDestroy() { + try { + this.callee.off(MSG_SEND_METHOD); + } catch (error) { + console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`); + } + } + } + ``` + +4. Obtain the caller object and access the CalleeAbility. 1. Import the **UIAbility** module. ```ts @@ -449,7 +420,7 @@ The following describes how to implement multi-device collaboration through cros ``` 2. Obtain the caller object. - The **context** attribute of the ability implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the caller object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements. + The **context** attribute of the UIAbility implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **context** attribute of the UIAbility, uses **startAbilityByCall** to start the CalleeAbility, obtain the caller object, and register the **onRelease** and **onRemoteStateChange** listeners of the CallerAbility. You need to implement processing based on service requirements. ```ts @@ -465,11 +436,19 @@ The following describes how to implement multi-device collaboration through cros if (data != null) { caller = data; console.info('get remote caller success'); - // Register the onRelease() listener of the caller ability. + // Register the onRelease listener of the CallerAbility. caller.onRelease((msg) => { console.info(`remote caller onRelease is called ${msg}`); }) console.info('remote caller register OnRelease succeed'); + // Register the onRemoteStateChange listener of the CallerAbility. + try { + caller.onRemoteStateChange((str) => { + console.log('Remote state changed ' + str); + }); + } catch (error) { + console.log('Caller.onRemoteStateChange catch error, error.code: ${JSON.stringify(error.code)}, error.message: ${JSON.stringify(error.message)}'); + } } }).catch((error) => { console.error(`get remote caller failed with ${error}`); @@ -477,10 +456,10 @@ The following describes how to implement multi-device collaboration through cros } ``` - For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned). + For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility or ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-or-serviceextensionability-across-devices-no-data-returned). -5. Sends agreed parcelable data to the callee ability. - 1. The parcelable data can be sent to the callee ability with or without a return value. The method and parcelable data must be consistent with those of the callee ability. The following example describes how to send data to the callee ability. +5. Sends agreed parcelable data to the CalleeAbility. + 1. The parcelable data can be sent to the CalleeAbility with or without a return value. The method and parcelable data must be consistent with those of the CalleeAbility. The following example describes how to send data to the CalleeAbility. ```ts const MSG_SEND_METHOD: string = 'CallSendMsg'; @@ -493,7 +472,7 @@ The following describes how to implement multi-device collaboration through cros } } ``` - 2. In the following, **CallWithResult** is used to send data **originMsg** to the callee ability and assign the data processed by the **CallSendMsg** method to **backMsg**. + 2. In the following, **CallWithResult** is used to send data **originMsg** to the CalleeAbility and assign the data processed by the **CallSendMsg** method to **backMsg**. ```ts const MSG_SEND_METHOD: string = 'CallSendMsg'; @@ -517,8 +496,8 @@ The following describes how to implement multi-device collaboration through cros 6. Release the caller object. - When the caller object is no longer required, use **release()** to release it. - + When the caller object is no longer required, use **release()** to release it. + ```ts releaseCall() { try { diff --git a/en/application-dev/application-models/inputmethodextentionability.md b/en/application-dev/application-models/inputmethodextentionability.md index 9a025339cab6a5f555fd61b15597400b31affeb7..8a7856f402bf30b1610521e3cf05dda7145c3509 100644 --- a/en/application-dev/application-models/inputmethodextentionability.md +++ b/en/application-dev/application-models/inputmethodextentionability.md @@ -33,7 +33,7 @@ In the **ets** directory of the target module, right-click and choose **New** > > **NOTE** > -> When compiling the input method application, use the signature at the system_core level. Otherwise, the application will not be able to start the keyboard. +> When compiling the input method application, use the signature at the system_basic level. Otherwise, the application will not be able to start the keyboard. The minimum template implements an input method application with the most basic features, such as starting the keyboard, entering text, and deleting input. You can diversify the feature set of the application by, for example, adding the feature to hide the keyboard. diff --git a/en/application-dev/application-models/js-ui-widget-development.md b/en/application-dev/application-models/js-ui-widget-development.md new file mode 100644 index 0000000000000000000000000000000000000000..cb8a2287992a55fb960672b078e6d0d20f6ec1b1 --- /dev/null +++ b/en/application-dev/application-models/js-ui-widget-development.md @@ -0,0 +1,590 @@ +# Developing a JS Widget + + +The following describes how to develop JS widgets based on the web-like development paradigm. + + +## Working Principles + +Below shows the working principles of the widget framework. + +**Figure 1** Widget framework working principles in the stage model + +![JSCardPrinciple](figures/JSCardPrinciple.png) + +The widget host consists of the following modules: + +- Widget usage: provides operations such as creating, deleting, or updating a widget. + +- Communication adapter: provided by the OpenHarmony SDK for communication with the Widget Manager. It sends widget-related operations to the Widget Manager. + +The Widget Manager consists of the following modules: + +- Periodic updater: starts a scheduled task based on the update policy to periodically update a widget after it is added to the Widget Manager. + +- Cache manager: caches view information of a widget after it is added to the Widget Manager to directly return the cached data when the widget is obtained next time. This reduces the latency greatly. + +- Lifecycle manager: suspends update when a widget is switched to the background or is blocked, and updates and/or clears widget data during upgrade and deletion. + +- Object manager: manages RPC objects of the widget host. It is used to verify requests from the widget host and process callbacks after the widget update. + +- Communication adapter: communicates with the widget host and provider through RPCs. + +The widget provider consists of the following modules: + +- Widget service: implemented by the widget provider developer to process requests on widget creation, update, and deletion, and to provide corresponding widget services. + +- Instance manager: implemented by the widget provider developer for persistent management of widget instances allocated by the Widget Manager. + +- Communication adapter: provided by the OpenHarmony SDK for communication with the Widget Manager. It pushes update data to the Widget Manager. + +> **NOTE** +> You only need to develop the widget provider. The system automatically handles the work of the widget host and Widget Manager. + + +## Available APIs + +The **FormExtensionAbility** class has the following APIs. For details, see [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md). + +| Name| Description| +| -------- | -------- | +| onAddForm(want: Want): formBindingData.FormBindingData | Called to notify the widget provider that a widget has been created.| +| onCastToNormalForm(formId: string): void | Called to notify the widget provider that a temporary widget has been converted to a normal one.| +| onUpdateForm(formId: string): void | Called to notify the widget provider that a widget has been updated.| +| onChangeFormVisibility(newStatus: { [key: string]: number }): void | Called to notify the widget provider of the change in widget visibility.| +| onFormEvent(formId: string, message: string): void | Called to instruct the widget provider to receive and process a widget event.| +| onRemoveForm(formId: string): void | Called to notify the widget provider that a widget has been destroyed.| +| onConfigurationUpdate(config: Configuration): void | Called when the configuration of the environment where the widget is running is updated.| +| onShareForm?(formId: string): { [key: string]: any } | Called by the widget provider to receive shared widget data.| + +The **FormProvider** class has the following APIs. For details, see [FormProvider](../reference/apis/js-apis-app-form-formProvider.md). + +| Name| Description| +| -------- | -------- | +| setFormNextRefreshTime(formId: string, minute: number, callback: AsyncCallback<void>): void; | Sets the next refresh time for a widget. This API uses an asynchronous callback to return the result.| +| setFormNextRefreshTime(formId: string, minute: number): Promise<void>; | Sets the next refresh time for a widget. This API uses a promise to return the result.| +| updateForm(formId: string, formBindingData: FormBindingData, callback: AsyncCallback<void>): void; | Updates a widget. This API uses an asynchronous callback to return the result.| +| updateForm(formId: string, formBindingData: FormBindingData): Promise<void>; | Updates a widget. This API uses a promise to return the result.| + +The **FormBindingData** class has the following APIs. For details, see [FormBindingData](../reference/apis/js-apis-app-form-formBindingData.md). + +| Name| Description| +| -------- | -------- | +| createFormBindingData(obj?: Object \| string): FormBindingData | Creates a **FormBindingData** object.| + + +## How to Develop + +The widget provider development based on the [stage model](stage-model-development-overview.md) involves the following key steps: + +- [Creating a FormExtensionAbility Instance](#creating-a-formextensionability-instance): Develop the lifecycle callback functions of FormExtensionAbility. + +- [Configuring the Widget Configuration Files](#configuring-the-widget-configuration-files): Configure the application configuration file **module.json5** and profile configuration file. + +- [Persistently Storing Widget Data](#persistently-storing-widget-data): This operation is a form of widget data exchange. + +- [Updating Widget Data](#updating-widget-data): Call **updateForm()** to update the information displayed on a widget. + +- [Developing the Widget UI Page](#developing-the-widget-ui-page): Use HML+CSS+JSON to develop a JS widget UI page. + +- [Developing Widget Events](#developing-widget-events): Add the router and message events for a widget. + + +### Creating a FormExtensionAbility Instance + +To create a widget in the stage model, implement the lifecycle callbacks of **FormExtensionAbility**. Generate a widget template by referring to [Developing a Service Widget](https://developer.harmonyos.com/en/docs/documentation/doc-guides/ohos-development-service-widget-0000001263280425). + +1. Import related modules to **EntryFormAbility.ts**. + + + ```ts + import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility'; + import formBindingData from '@ohos.app.form.formBindingData'; + import formInfo from '@ohos.app.form.formInfo'; + import formProvider from '@ohos.app.form.formProvider'; + import dataStorage from '@ohos.data.storage'; + ``` + +2. Implement the FormExtension lifecycle callbacks in **EntryFormAbility.ts**. + + + ```ts + export default class EntryFormAbility extends FormExtensionAbility { + onAddForm(want) { + console.info('[EntryFormAbility] onAddForm'); + // Called when the widget is created. The widget provider should return the widget data binding class. + let obj = { + "title": "titleOnCreate", + "detail": "detailOnCreate" + }; + let formData = formBindingData.createFormBindingData(obj); + return formData; + } + onCastToNormalForm(formId) { + // Called when the widget host converts the temporary widget into a normal one. The widget provider should do something to respond to the conversion. + console.info('[EntryFormAbility] onCastToNormalForm'); + } + onUpdateForm(formId) { + // Override this method to support scheduled updates, periodic updates, or updates requested by the widget host. + console.info('[EntryFormAbility] onUpdateForm'); + let obj = { + "title": "titleOnUpdate", + "detail": "detailOnUpdate" + }; + let formData = formBindingData.createFormBindingData(obj); + formProvider.updateForm(formId, formData).catch((error) => { + console.info('[EntryFormAbility] updateForm, error:' + JSON.stringify(error)); + }); + } + onChangeFormVisibility(newStatus) { + // Called when the widget host initiates an event about visibility changes. The widget provider should do something to respond to the notification. This callback takes effect only for system applications. + console.info('[EntryFormAbility] onChangeFormVisibility'); + } + onFormEvent(formId, message) { + // If the widget supports event triggering, override this method and implement the trigger. + console.info('[EntryFormAbility] onFormEvent'); + } + onRemoveForm(formId) { + // Delete widget data. + console.info('[EntryFormAbility] onRemoveForm'); + } + onConfigurationUpdate(config) { + console.info('[EntryFormAbility] nConfigurationUpdate, config:' + JSON.stringify(config)); + } + onAcquireFormState(want) { + return formInfo.FormState.READY; + } + } + ``` + +> **NOTE** +> FormExtensionAbility cannot reside in the background. Therefore, continuous tasks cannot be processed in the widget lifecycle callbacks. + + +### Configuring the Widget Configuration Files + +1. Configure ExtensionAbility information under **extensionAbilities** in the [module.json5 file](../quick-start/module-configuration-file.md). For a FormExtensionAbility, you must specify **metadata**. Specifically, set **name** to **ohos.extension.form** (fixed), and set **resource** to the index of the widget configuration information. + Example configuration: + + + ```json + { + "module": { + ... + "extensionAbilities": [ + { + "name": "EntryFormAbility", + "srcEntry": "./ets/entryformability/EntryFormAbility.ts", + "label": "$string:EntryFormAbility_label", + "description": "$string:EntryFormAbility_desc", + "type": "form", + "metadata": [ + { + "name": "ohos.extension.form", + "resource": "$profile:form_config" + } + ] + } + ] + } + } + ``` + +2. Configure the widget configuration information. In the **metadata** configuration item of FormExtensionAbility, you can specify the resource index of specific configuration information of the widget. For example, if resource is set to **$profile:form_config**, **form_config.json** in the **resources/base/profile/** directory of the development view is used as the profile configuration file of the widget. The following table describes the internal field structure. + + **Table 1** Widget profile configuration file + + | Field| Description| Data Type| Default Value Allowed| + | -------- | -------- | -------- | -------- | + | name | Class name of the widget. The value is a string with a maximum of 127 bytes.| String| No| + | description | Description of the widget. The value can be a string or a resource index to descriptions in multiple languages. The value is a string with a maximum of 255 bytes.| String| Yes (initial value: left empty)| + | src | Full path of the UI code corresponding to the widget.| String| No| + | window | Window-related configurations.| Object| Yes| + | isDefault | Whether the widget is a default one. Each UIAbility has only one default widget.
- **true**: The widget is the default one.
- **false**: The widget is not the default one.| Boolean| No| + | colorMode | Color mode of the widget.
- **auto**: auto-adaptive color mode
- **dark**: dark color mode
- **light**: light color mode| String| Yes (initial value: **auto**)| + | supportDimensions | Grid styles supported by the widget.
- **1 * 2**: indicates a grid with one row and two columns.
- **2 * 2**: indicates a grid with two rows and two columns.
- **2 * 4**: indicates a grid with two rows and four columns.
- **4 * 4**: indicates a grid with four rows and four columns.| String array| No| + | defaultDimension | Default grid style of the widget. The value must be available in the **supportDimensions** array of the widget.| String| No| + | updateEnabled | Whether the widget can be updated periodically.
- **true**: The widget can be updated at a specified interval (**updateDuration**) or at the scheduled time (**scheduledUpdateTime**). **updateDuration** takes precedence over **scheduledUpdateTime**.
- **false**: The widget cannot be updated periodically.| Boolean| No| + | scheduledUpdateTime | Scheduled time to update the widget. The value is in 24-hour format and accurate to minute.
**updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used.| String| Yes (initial value: **0:0**)| + | updateDuration | Interval to update the widget. The value is a natural number, in the unit of 30 minutes.
If the value is **0**, this field does not take effect.
If the value is a positive integer *N*, the interval is calculated by multiplying *N* and 30 minutes.
**updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used.| Number| Yes (initial value: **0**)| + | formConfigAbility | Link to a specific page of the application. The value is a URI.| String| Yes (initial value: left empty)| + | formVisibleNotify | Whether the widget is allowed to use the widget visibility notification.| String| Yes (initial value: left empty)| + | metaData | Metadata of the widget. This field contains the array of the **customizeData** field.| Object| Yes (initial value: left empty)| + + Example configuration: + + + ```json + { + "forms": [ + { + "name": "widget", + "description": "This is a service widget.", + "src": "./js/widget/pages/index/index", + "window": { + "designWidth": 720, + "autoDesignWidth": true + }, + "colorMode": "auto", + "isDefault": true, + "updateEnabled": true, + "scheduledUpdateTime": "10:30", + "updateDuration": 1, + "defaultDimension": "2*2", + "supportDimensions": [ + "2*2" + ] + } + ] + } + ``` + + +### Persistently Storing Widget Data + +A widget provider is usually started when it is needed to provide information about a widget. The Widget Manager supports multi-instance management and uses the widget ID to identify an instance. If the widget provider supports widget data modification, it must persistently store the data based on the widget ID, so that it can access the data of the target widget when obtaining, updating, or starting a widget. + + +```ts +const DATA_STORAGE_PATH = "/data/storage/el2/base/haps/form_store"; +async function storeFormInfo(formId: string, formName: string, tempFlag: boolean) { + // Only the widget ID (formId), widget name (formName), and whether the widget is a temporary one (tempFlag) are persistently stored. + let formInfo = { + "formName": formName, + "tempFlag": tempFlag, + "updateCount": 0 + }; + try { + const storage = await dataStorage.getStorage(DATA_STORAGE_PATH); + // put form info + await storage.put(formId, JSON.stringify(formInfo)); + console.info(`[EntryFormAbility] storeFormInfo, put form info successfully, formId: ${formId}`); + await storage.flush(); + } catch (err) { + console.error(`[EntryFormAbility] failed to storeFormInfo, err: ${JSON.stringify(err)}`); + } +} + +export default class EntryFormAbility extends FormExtension { + ... + onAddForm(want) { + console.info('[EntryFormAbility] onAddForm'); + + let formId = want.parameters["ohos.extra.param.key.form_identity"]; + let formName = want.parameters["ohos.extra.param.key.form_name"]; + let tempFlag = want.parameters["ohos.extra.param.key.form_temporary"]; + // Persistently store widget data for subsequent use, such as instance acquisition and update. + // Implement this API based on project requirements. + storeFormInfo(formId, formName, tempFlag); + + let obj = { + "title": "titleOnCreate", + "detail": "detailOnCreate" + }; + let formData = formBindingData.createFormBindingData(obj); + return formData; + } +} +``` + +You should override **onRemoveForm** to implement widget data deletion. + + +```ts +const DATA_STORAGE_PATH = "/data/storage/el2/base/haps/form_store"; +async function deleteFormInfo(formId: string) { + try { + const storage = await dataStorage.getStorage(DATA_STORAGE_PATH); + // del form info + await storage.delete(formId); + console.info(`[EntryFormAbility] deleteFormInfo, del form info successfully, formId: ${formId}`); + await storage.flush(); + } catch (err) { + console.error(`[EntryFormAbility] failed to deleteFormInfo, err: ${JSON.stringify(err)}`); + } +} + +... + +export default class EntryFormAbility extends FormExtension { + ... + onRemoveForm(formId) { + console.info('[EntryFormAbility] onRemoveForm'); + // Delete the persistent widget instance data. + // Implement this API based on project requirements. + deleteFormInfo(formId); + } +} +``` + +For details about how to implement persistent data storage, see [Persisting Preferences Data](../database/data-persistence-by-preferences.md). + +The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary. + +- Normal widget: a widget persistently used by the widget host + +- Temporary widget: a widget temporarily used by the widget host + +Data of a temporary widget will be deleted on the Widget Manager if the widget framework is killed and restarted. The widget provider, however, is not notified of the deletion and still keeps the data. Therefore, the widget provider needs to clear the data of temporary widgets proactively if the data has been kept for a long period of time. If the widget host has converted a temporary widget into a normal one, the widget provider should change the widget data from temporary storage to persistent storage. Otherwise, the widget data may be deleted by mistake. + + +### Updating Widget Data + +When an application initiates a scheduled or periodic update, the application obtains the latest data and calls **updateForm()** to update the widget. + + +```ts +onUpdateForm(formId) { + // Override this method to support scheduled updates, periodic updates, or updates requested by the widget host. + console.info('[EntryFormAbility] onUpdateForm'); + let obj = { + "title": "titleOnUpdate", + "detail": "detailOnUpdate" + }; + let formData = formBindingData.createFormBindingData(obj); + // Call the updateForm() method to update the widget. Only the data passed through the input parameter is updated. Other information remains unchanged. + formProvider.updateForm(formId, formData).catch((error) => { + console.info('[EntryFormAbility] updateForm, error:' + JSON.stringify(error)); + }); +} +``` + + +### Developing the Widget UI Page + +You can use the web-like paradigm (HML+CSS+JSON) to develop JS widget pages. This section describes how to develop a page shown below. + +![WidgetCardPage](figures/WidgetCardPage.png) + +- HML: uses web-like paradigm components to describe the widget page information. + + + ```html +
+ +
+ +
+
+ {{title}} + {{detail}} +
+
+
+ ``` + +- CSS: defines style information about the web-like paradigm components in HML. + + + ```css + .container { + flex-direction: column; + justify-content: center; + align-items: center; + } + + .bg-img { + flex-shrink: 0; + height: 100%; + } + + .container-inner { + flex-direction: column; + justify-content: flex-end; + align-items: flex-start; + height: 100%; + width: 100%; + padding: 12px; + } + + .title { + font-size: 19px; + font-weight: bold; + color: white; + text-overflow: ellipsis; + max-lines: 1; + } + + .detail_text { + font-size: 16px; + color: white; + opacity: 0.66; + text-overflow: ellipsis; + max-lines: 1; + margin-top: 6px; + } + ``` + +- JSON: defines data and event interaction on the widget UI page. + + + ```json + { + "data": { + "title": "TitleDefault", + "detail": "TextDefault" + }, + "actions": { + "routerEvent": { + "action": "router", + "abilityName": "EntryAbility", + "params": { + "message": "add detail" + } + } + } + } + ``` + + +### Developing Widget Events + +You can set router and message events for components on a widget. The router event applies to UIAbility redirection, and the message event applies to custom click events. + +The key steps are as follows: + +1. Set the **onclick** field in the HML file to **routerEvent** or **messageEvent**, depending on the **actions** settings in the JSON file. + +2. Set the router event. + + - **action**: **"router"**, which indicates a router event. + - **abilityName**: name of the UIAbility to redirect to (PageAbility component in the FA model and UIAbility component in the stage model). For example, the default UIAbility name of the stage model created by DevEco Studio is EntryAbility. + - **params**: custom parameters passed to the target UIAbility. Set them as required. The value can be obtained from **parameters** in **want** used for starting the target UIAbility. For example, in the lifecycle function **onCreate** of the main ability in the stage model, you can obtain **want** and its **parameters** field. + +3. Set the message event. + + - **action**: **"message"**, which indicates a message event. + - **params**: custom parameters of the message event. Set them as required. The value can be obtained from **message** in the widget lifecycle function **onFormEvent()**. + +The following is an example: + +- HML file: + + + ```html +
+ +
+ +
+
+ {{title}} + {{detail}} +
+
+
+ ``` + +- CSS file: + + + ```css + .container { + flex-direction: column; + justify-content: center; + align-items: center; + } + + .bg-img { + flex-shrink: 0; + height: 100%; + } + + .container-inner { + flex-direction: column; + justify-content: flex-end; + align-items: flex-start; + height: 100%; + width: 100%; + padding: 12px; + } + + .title { + font-size: 19px; + font-weight: bold; + color: white; + text-overflow: ellipsis; + max-lines: 1; + } + + .detail_text { + font-size: 16px; + color: white; + opacity: 0.66; + text-overflow: ellipsis; + max-lines: 1; + margin-top: 6px; + } + ``` + +- JSON file: + + + ```json + { + "data": { + "title": "TitleDefault", + "detail": "TextDefault" + }, + "actions": { + "routerEvent": { + "action": "router", + "abilityName": "EntryAbility", + "params": { + "info": "router info", + "message": "router message" + } + }, + "messageEvent": { + "action": "message", + "params": { + "detail": "message detail" + } + } + } + } + ``` + +- Receive the router event and obtain parameters in UIAbility. + + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility' + + export default class EntryAbility extends UIAbility { + onCreate(want, launchParam) { + let params = JSON.parse(want.parameters.params); + // Obtain the info parameter passed in the router event. + if (params.info === "router info") { + // do something + // console.info("router info:" + params.info) + } + // Obtain the message parameter passed in the router event. + if (params.message === "router message") { + // do something + // console.info("router message:" + params.message) + } + } + ... + }; + ``` + +- Receive the message event in FormExtensionAbility and obtain parameters. + + + ```ts + import FormExtension from '@ohos.app.form.FormExtensionAbility'; + + export default class FormAbility extends FormExtension { + ... + onFormEvent(formId, message) { + // Obtain the detail parameter passed in the message event. + let msg = JSON.parse(message) + if (msg.detail === "message detail") { + // do something + // console.info("message info:" + msg.detail) + } + } + ... + }; + ``` diff --git a/en/application-dev/application-models/lifecycleapp-switch.md b/en/application-dev/application-models/lifecycleapp-switch.md index 892a8915bfed9927c2707364bdaffa1547f71bf6..9d89597ef5a77246ec7450261916061062d24d8d 100644 --- a/en/application-dev/application-models/lifecycleapp-switch.md +++ b/en/application-dev/application-models/lifecycleapp-switch.md @@ -1,17 +1,17 @@ # LifecycleApp Switching - | API in the FA Model| Corresponding d.ts File in the Stage Model| Corresponding API in the Stage Model| +| API in the FA Model| Corresponding d.ts File in the Stage Model| Corresponding API in the Stage Model| | -------- | -------- | -------- | | onShow?(): void; | \@ohos.window.d.ts | [on(eventType: 'windowStageEvent', callback: Callback<WindowStageEventType>): void;](../reference/apis/js-apis-window.md#onwindowstageevent9)
Listens for the switching to the [foreground](../reference/apis/js-apis-window.md#windowstageeventtype9).| | onHide?(): void; | \@ohos.window.d.ts | [on(eventType: 'windowStageEvent', callback: Callback<WindowStageEventType>): void;](../reference/apis/js-apis-window.md#onwindowstageevent9)
Listens for the switching to the [background](../reference/apis/js-apis-window.md#windowstageeventtype9).| -| onDestroy?(): void; | \@ohos.app.ability.UIAbility.d.ts | [onDestroy(): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityondestroy) | +| onDestroy?(): void; | \@ohos.app.ability.UIAbility.d.ts | [onDestroy(): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityondestroy) | | onCreate?(): void; | \@ohos.app.ability.UIAbility.d.ts | [onCreate(want: Want, param: AbilityConstant.LaunchParam): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncreate) | | onWindowDisplayModeChanged?(isShownInMultiWindow: boolean, newConfig: resourceManager.Configuration): void; | There is no corresponding API in the stage model.| No corresponding API is provided.| | onStartContinuation?(): boolean; | There is no corresponding API in the stage model.| In the stage model, an application does not need to detect whether the continuation is successful (detected when the application initiates the continuation request). Therefore, the **onStartContinuation()** callback is deprecated.| | onSaveData?(data: Object): boolean; | \@ohos.app.ability.UIAbility.d.ts | [onContinue(wantParam : {[key: string]: any}): AbilityConstant.OnContinueResult;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncontinue) | | onCompleteContinuation?(result: number): void; | application\ContinueCallback.d.ts | [onContinueDone(result: number): void;](../reference/apis/js-apis-distributedMissionManager.md#continuecallback) | -| onRestoreData?(data: Object): void; | \@ohos.app.ability.UIAbility.d.ts | [onCreate(want: Want, param: AbilityConstant.LaunchParam): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncreate)
[onNewWant(want: Want, launchParams: AbilityConstant.LaunchParam): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant)
In standard or singleton mode, the target ability completes data restoration in the **onCreate()** callback. In the callback, **launchParam.launchReason** is used to determine whether it is a continuation-based launch scenario. If it is, the data saved before continuation can be obtained from the **want** parameter.| +| onRestoreData?(data: Object): void; | \@ohos.app.ability.UIAbility.d.ts | [onCreate(want: Want, param: AbilityConstant.LaunchParam): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncreate)
[onNewWant(want: Want, launchParams: AbilityConstant.LaunchParam): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant)
In multiton or singleton mode, the target ability completes data restoration in the **onCreate()** callback. In the callback, **launchParam.launchReason** is used to determine whether it is a continuation-based launch scenario. If it is, the data saved before continuation can be obtained from the **want** parameter.| | onRemoteTerminated?(): void; | application\ContinueCallback.d.ts | [onContinueDone(result: number): void;](../reference/apis/js-apis-distributedMissionManager.md#continuecallback) | | onSaveAbilityState?(outState: PacMap): void; | \@ohos.app.ability.UIAbility.d.ts | [onSaveState(reason: AbilityConstant.StateType, wantParam : {[key: string]: any}): AbilityConstant.OnSaveResult;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonsavestate) | | onRestoreAbilityState?(inState: PacMap): void; | \@ohos.app.ability.UIAbility.d.ts | [onCreate(want: Want, param: AbilityConstant.LaunchParam): void;](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncreate)
After the application is restarted, the **onCreate()** callback is triggered. In the callback, **launchParam.launchReason** is used to determine whether it is a self-recovery scenario. If it is, the data saved before the restart can be obtained from the **want** parameter.| diff --git a/en/application-dev/application-models/mission-management-launch-type.md b/en/application-dev/application-models/mission-management-launch-type.md index 267ed5011fe28cdc576e6caca85a526450110867..199de6eefead9fc056adf8d08c49f792a54a4a83 100644 --- a/en/application-dev/application-models/mission-management-launch-type.md +++ b/en/application-dev/application-models/mission-management-launch-type.md @@ -8,16 +8,19 @@ The following describes how the mission list manager manages the UIAbility insta - **singleton**: Only one UIAbility instance exists for an application. **Figure 1** Missions and singleton mode + ![mission-and-singleton](figures/mission-and-singleton.png) -- **standard**: Each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, a **UIAbility** instance is created in the application process. +- **multiton**: Each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, a **UIAbility** instance is created in the application process. + + **Figure 2** Missions and multiton mode - **Figure 2** Missions and standard mode - ![mission-and-standard](figures/mission-and-standard.png) + ![mission-and-multiton](figures/mission-and-multiton.png) -- **specified**: The ([onAcceptWant](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant)) method of [AbilityStage](abilitystage.md) determines whether to create an instance. +- **specified**: The ([onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant)) method of [AbilityStage](abilitystage.md) determines whether to create an instance. **Figure 3** Missions and specified mode + ![mission-and-specified](figures/mission-and-specified.png) diff --git a/en/application-dev/application-models/mission-management-overview.md b/en/application-dev/application-models/mission-management-overview.md index 3346e8105deef0dce6dc785b7e88b10e2a4ce3e1..ba55ebb136ebffca0294bf69013f2f2ab4392e7f 100644 --- a/en/application-dev/application-models/mission-management-overview.md +++ b/en/application-dev/application-models/mission-management-overview.md @@ -30,102 +30,100 @@ Missions are managed by system applications (such as home screen), rather than t A UIAbility instance corresponds to an independent mission. Therefore, when an application calls [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start a UIAbility, a mission is created. - -To call [missionManager](../reference/apis/js-apis-application-missionManager.md) to manage missions, the home screen application must request the **ohos.permission.MANAGE_MISSIONS** permission. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). - - -You can use **missionManager** to manage missions, for example, listening for mission changes, obtaining mission information or snapshots, and clearing, locking, or unlocking missions. The sample code is as follows: - - - -```ts -import missionManager from '@ohos.app.ability.missionManager' - -let listener = { - // Listen for mission creation. - onMissionCreated: function (mission) { - console.info("--------onMissionCreated-------") - }, - // Listen for mission destruction. - onMissionDestroyed: function (mission) { - console.info("--------onMissionDestroyed-------") - }, - // Listen for mission snapshot changes. - onMissionSnapshotChanged: function (mission) { - console.info("--------onMissionSnapshotChanged-------") - }, - // Listen for switching the mission to the foreground. - onMissionMovedToFront: function (mission) { - console.info("--------onMissionMovedToFront-------") - }, - // Listen for mission icon changes. - onMissionIconUpdated: function (mission, icon) { - console.info("--------onMissionIconUpdated-------") - }, - // Listen for mission name changes. - onMissionLabelUpdated: function (mission) { - console.info("--------onMissionLabelUpdated-------") - }, - // Listen for mission closure events. - onMissionClosed: function (mission) { - console.info("--------onMissionClosed-------") - } -}; - -// 1. Register a mission change listener. -let listenerId = missionManager.on('mission', listener); - -// 2. Obtain the latest 20 missions in the system. -missionManager.getMissionInfos("", 20, (error, missions) => { - console.info("getMissionInfos is called, error.code = " + error.code); - console.info("size = " + missions.length); - console.info("missions = " + JSON.stringify(missions)); -}); - -// 3. Obtain the detailed information about a mission. -let missionId = 11; // The mission ID 11 is only an example. -let mission = missionManager.getMissionInfo("", missionId).catch(function (err) { - console.info(err); -}); - -// 4. Obtain the mission snapshot. -missionManager.getMissionSnapShot("", missionId, (error, snapshot) => { - console.info("getMissionSnapShot is called, error.code = " + error.code); - console.info("bundleName = " + snapshot.ability.bundleName); -}) - -// 5. Obtain the low-resolution mission snapshot. -missionManager.getLowResolutionMissionSnapShot("", missionId, (error, snapshot) => { - console.info("getLowResolutionMissionSnapShot is called, error.code = " + error.code); - console.info("bundleName = " + snapshot.ability.bundleName); -}) - -// 6. Lock or unlock the mission. -missionManager.lockMission(missionId).then(() => { - console.info("lockMission is called "); -}); - -missionManager.unlockMission(missionId).then(() => { - console.info("unlockMission is called "); -}); - -// 7. Switch the mission to the foreground. -missionManager.moveMissionToFront(missionId).then(() => { - console.info("moveMissionToFront is called "); -}); - -// 8. Clear a single mission. -missionManager.clearMission(missionId).then(() => { - console.info("clearMission is called "); -}); - -// 9. Clear all missions. -missionManager.clearAllMissions().catch(function (err) { - console.info(err); -}); - -// 10. Deregister the mission change listener. -missionManager.off('mission', listenerId, (error) => { - console.info("unregisterMissionListener"); -}) -``` +To call [missionManager](../reference/apis/js-apis-application-missionManager.md) to manage missions, the home screen application must request the **ohos.permission.MANAGE_MISSIONS** permission. For details about the configuration, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + +You can use **missionManager** to manage missions, for example, listening for mission changes, obtaining mission information or snapshots, and clearing, locking, or unlocking missions. + + ```ts + import missionManager from '@ohos.app.ability.missionManager' + + let listener = { + // Listen for mission creation. + onMissionCreated: function (mission) { + console.info("--------onMissionCreated-------") + }, + // Listen for mission destruction. + onMissionDestroyed: function (mission) { + console.info("--------onMissionDestroyed-------") + }, + // Listen for mission snapshot changes. + onMissionSnapshotChanged: function (mission) { + console.info("--------onMissionSnapshotChanged-------") + }, + // Listen for switching the mission to the foreground. + onMissionMovedToFront: function (mission) { + console.info("--------onMissionMovedToFront-------") + }, + // Listen for mission icon changes. + onMissionIconUpdated: function (mission, icon) { + console.info("--------onMissionIconUpdated-------") + }, + // Listen for mission name changes. + onMissionLabelUpdated: function (mission) { + console.info("--------onMissionLabelUpdated-------") + }, + // Listen for mission closure events. + onMissionClosed: function (mission) { + console.info("--------onMissionClosed-------") + } + }; + + // 1. Register a mission change listener. + let listenerId = missionManager.on('mission', listener); + + // 2. Obtain the latest 20 missions in the system. + missionManager.getMissionInfos("", 20, (error, missions) => { + console.info("getMissionInfos is called, error.code = " + error.code); + console.info("size = " + missions.length); + console.info("missions = " + JSON.stringify(missions)); + }); + + // 3. Obtain the detailed information about a mission. + let missionId = 11; // The mission ID 11 is only an example. + let mission = missionManager.getMissionInfo("", missionId).catch(function (err) { + console.info(err); + }); + + // 4. Obtain the mission snapshot. + missionManager.getMissionSnapShot("", missionId, (error, snapshot) => { + console.info("getMissionSnapShot is called, error.code = " + error.code); + console.info("bundleName = " + snapshot.ability.bundleName); + }) + + // 5. Obtain the low-resolution mission snapshot. + missionManager.getLowResolutionMissionSnapShot("", missionId, (error, snapshot) => { + console.info("getLowResolutionMissionSnapShot is called, error.code = " + error.code); + console.info("bundleName = " + snapshot.ability.bundleName); + }) + + // 6. Lock or unlock the mission. + missionManager.lockMission(missionId).then(() => { + console.info("lockMission is called "); + }); + + missionManager.unlockMission(missionId).then(() => { + console.info("unlockMission is called "); + }); + + // 7. Switch the mission to the foreground. + missionManager.moveMissionToFront(missionId).then(() => { + console.info("moveMissionToFront is called "); + }); + + // 8. Clear a single mission. + missionManager.clearMission(missionId).then(() => { + console.info("clearMission is called "); + }); + + // 9. Clear all missions. + missionManager.clearAllMissions().catch(function (err) { + console.info(err); + }); + + // 10. Deregister the mission change listener. + missionManager.off('mission', listenerId, (error) => { + console.info("unregisterMissionListener"); + }) + ``` + + diff --git a/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md b/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md index 9fdc03477c0552f523a0ee9c40c6fa5b8d7c4363..c98d39ff8348f330d58138db89afcc2a0d5995ca 100644 --- a/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md +++ b/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md @@ -8,7 +8,7 @@ Figure 1 Mission snapshot of a UIAbility ![](figures/mission-list-recent.png) -You can also use [UIAbilityContext.setMissionIcon()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionicon) and [UIAbilityContext.setMissionLabel()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionlabel) to customize the icon and name for a mission snapshot. For example, for a UIAbility instance with the launch type set to **standard**, you can configure the icon and name for each mission snapshot based on different functions. +You can also use [UIAbilityContext.setMissionIcon()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionicon) and [UIAbilityContext.setMissionLabel()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionlabel) to customize the icon and name for a mission snapshot. For example, for a UIAbility instance in multiton mode, you can configure the icon and name for each mission snapshot based on different functions. This document describes the following operations: @@ -48,4 +48,4 @@ The display effect is shown below. Figure 3 Mission snapshot name -![](figures/mission-set-task-snapshot-label.png) \ No newline at end of file +![](figures/mission-set-task-snapshot-label.png) diff --git a/en/application-dev/application-models/module-switch.md b/en/application-dev/application-models/module-switch.md index a6e532e94827198880cb772c174725b2a89c469b..9f31f892cda1a077301773a129f4f7915cd25c82 100644 --- a/en/application-dev/application-models/module-switch.md +++ b/en/application-dev/application-models/module-switch.md @@ -3,7 +3,7 @@ When switching an application from the FA model to the stage model, you must migrate the configurations under the **module** tag in the **config.json** file to the **module** tag in the **module.json5** file. -### **Table 1** module comparison +### Table 1 module Comparison | Field Name in the FA Model| Field Description| Field Name in the Stage Model| Difference| | -------- | -------- | -------- | -------- | @@ -21,13 +21,13 @@ When switching an application from the FA model to the stage model, you must mig | shortcuts | Shortcuts of the application.| shortcut_config.json| In the stage model, the **shortcut_config.json** file is defined in **resources/base/profile** in the development view.| | reqPermissions | Permissions that the application requests from the system when it is running.| requestPermissions | The field name is changed.| | colorMode | Color mode of the application.| / | This configuration is not supported in the stage model.| -| distroFilter | Distribution rules of the application.| distroFilter_config.json| In the stage model, the **distroFilter_config.json** file is defined in **resources/base/profile** in the development view.| +| distributionFilter | Distribution rules of the application.| distroFilter_config.json| In the stage model, the **distroFilter_config.json** file is defined in **resources/base/profile** in the development view.| | reqCapabilities | Device capabilities required for running the application.| / | This configuration is not supported in the stage model.| | commonEvents | Common events.| common_event_config.json| In the stage model, the **common_event_config.json** file is defined in **resources/base/profile** in the development view.| | entryTheme | Keyword of an OpenHarmony internal theme.| / | This configuration is not supported in the stage model.| -### Table 2 metaData comparison +### Table 2 metaData Comparison | Field Name Under metaData in the FA Model| Field Description| Field Name Under metaData in the Stage Model| Difference| | -------- | -------- | -------- | -------- | @@ -35,7 +35,7 @@ When switching an application from the FA model to the stage model, you must mig | results | Metadata of the ability return value.| / | This configuration is not supported in the stage model.| | customizeData | Custom metadata of the parent component. **parameters** and **results** cannot be configured in **application**.| metadata | See [Table 3](#table-3-comparison-between-customizedata-under-metadata-in-the-fa-model-and-metadata-in-the-stage-model).| -### Table 3 Comparison between customizeData under metaData in the FA model and metadata in the stage model +### Table 3 Comparison Between customizeData Under metaData in the FA Model and metadata in the Stage Model | Field Name Under customizeData in metaData in the FA Model| Field Description| Field Name Under metaData in the Stage Model| Difference| | -------- | -------- | -------- | -------- | @@ -44,14 +44,14 @@ When switching an application from the FA model to the stage model, you must mig | extra | Format of the current custom data. The value is the resource value of **extra**.| resource | The field name is changed. For details, see [Table 4](#table 4-metadata-examples).| -### Table 4 metaData examples +### Table 4 metaData Examples | Example in the FA Model| Example in the Stage Model| | -------- | -------- | | "meteData": {
"customizeDate": [{
"name": "label",
"value": "string",
"extra": "$string:label",
}]
} | "meteData": [{
"name": "label",
"value": "string",
"resource": "$string:label",
}] | -### Table 5 abilities comparison +### Table 5 abilities Comparison | Field Name Under abilities in the FA Model| Field Description| Field Name Under abilities in the Stage Model| Difference| | -------- | -------- | -------- | -------- | @@ -71,5 +71,5 @@ When switching an application from the FA model to the stage model, you must mig | formsEnabled | Whether the ability can provide widgets.| / | This configuration is not supported in the stage model.| | forms | Information about the widgets used by the ability. This field is valid only when **formsEnabled** is set to **true**.| form_config.json| In the stage model, the **form_config.json** file is defined in **resources/base/profile** in the development view.| | srcLanguage | Programming language used to develop the ability.| / | This configuration is not supported in the stage model.| -| srcPath | Path of the JS component code corresponding to the ability.| srcEntrance | Path of the JS code corresponding to the ability.| +| srcPath | Path of the JS component code corresponding to the ability.| srcEntry | Path of the JS code corresponding to the ability.| | uriPermission | Application data that the ability can access.| / | This configuration is not supported in the stage model.| diff --git a/en/application-dev/application-models/pageability-launch-type.md b/en/application-dev/application-models/pageability-launch-type.md index 5241a7cabefbf3e68e6a3f413b8892ef5f6ff8d3..3b75ff6a60899f19f08aad5235fb3dc49632cb01 100644 --- a/en/application-dev/application-models/pageability-launch-type.md +++ b/en/application-dev/application-models/pageability-launch-type.md @@ -5,10 +5,10 @@ Depending on the launch type, the action performed when the PageAbility starts d **Table 1** PageAbility launch types -| Launch Type| Description| -| -------- | -------- | -| singleton | Each time **startAbility()** is called, if an ability instance of this type already exists in the application process, the instance is reused. There is only one ability instance of this type in **Recents**.
A typical scenario is as follows: When a user opens a video playback application and watches a video, returns to the home screen, and opens the video playback application again, the video that the user watched before returning to the home screen is still played.| -| standard | Default type. Each time **startAbility()** is called, a new ability instance is created in the application process. Multiple ability instances of this type are displayed in **Recents**.
A typical scenario is as follows: When a user opens a document application and touches **New**, a new document task is created. Multiple new document missions are displayed in **Recents**.| +| Launch Type| Meaning | Description| +| -------- | -------- | -------- | +| singleton | Singleton mode| Each time **startAbility()** is called, if an ability instance of this type already exists in the application process, the instance is reused. There is only one ability instance of this type in **Recents**.
A typical scenario is as follows: When a user opens a video playback application and watches a video, returns to the home screen, and opens the video playback application again, the video that the user watched before returning to the home screen is still played.| +| standard | Multiton mode| Default type. Each time **startAbility()** is called, a new ability instance is created in the application process. Multiple ability instances of this type are displayed in **Recents**.
A typical scenario is as follows: When a user opens a document application and touches **New**, a new document task is created. Multiple new document missions are displayed in **Recents**.| You can set **launchType** in the **config.json** file to configure the launch type. The sample code is as follows: @@ -19,8 +19,8 @@ You can set **launchType** in the **config.json** file to configure the launch t // ... "abilities": [ { - // singleton mode. - // standard mode. + // singleton means the singleton mode. + // standard means the multiton mode. "launchType": "standard", // ... } @@ -30,7 +30,8 @@ You can set **launchType** in the **config.json** file to configure the launch t ``` -When the PageAbility is started for the first time (either in standard or singleton mode), the [PageAbility lifecycle callbacks](pageability-lifecycle.md#table13118194914476) are triggered. When it is not started for the first time in singleton mode, the **onNewWant()** callback (as described in the table below) is triggered, but the **onCreate()** callback is not. +When the PageAbility is started in multiton mode or it is started in singleton mode for the first time, the [PageAbility lifecycle callbacks](pageability-lifecycle.md#table13118194914476) are triggered. When it is not started for the first time in singleton mode, the **onNewWant()** callback (as described in the table below) is triggered, but the **onCreate()** callback is not. + **Table 2** Callbacks specific to the singleton mode diff --git a/en/application-dev/application-models/process-model-stage.md b/en/application-dev/application-models/process-model-stage.md index bbfa0602aecb127c5e484f0ebbdcb166f81310f7..03da480722de124a1ede58da52e74cd48c5f23f0 100644 --- a/en/application-dev/application-models/process-model-stage.md +++ b/en/application-dev/application-models/process-model-stage.md @@ -15,7 +15,8 @@ The OpenHarmony process model is shown below. > NOTE > -> You can create ServiceExtensionAbility and DataShareExtensionAbility only for system applications. +> - You can create ServiceExtensionAbility and DataShareExtensionAbility only for system applications. +> - To view information about all running processes, run the **hdc shell** command to enter the shell CLI of the device, and run the **ps -ef** command. A system application can apply for multi-process permissions (as shown in the following figure) and configure a custom process for an HAP. UIAbility, DataShareExtensionAbility, and ServiceExtensionAbility in the HAP run in the custom process. Different HAPs run in different processes by configuring different process names. diff --git a/en/application-dev/application-models/redirection-rules.md b/en/application-dev/application-models/redirection-rules.md index d7456653640942bca333a28f7f6d5262ec4d63f3..4e9f65a8b3439fe4dde4761fbcb3d341151ba4f3 100644 --- a/en/application-dev/application-models/redirection-rules.md +++ b/en/application-dev/application-models/redirection-rules.md @@ -33,4 +33,4 @@ To enable an ability to be called by any application, configure the **config.jso ``` -If the ability contains **skills**, you are advised to set **visible** to **true** so that the ability can be [implicitly started](explicit-implicit-want-mappings.md#matching-rules-of-implicit-want) by other applications. If this attribute is set to **false**, the system returns **PERMISSION_DENIED** when other applications attempt to start the ability. In this case, a system application can request the [START_INVISIBLE_ABILITY](../security/permission-list.md) permission to start the ability. Example abilities with **visible** set to **false** are home screen, voice assistant, or search assistant. +If the ability contains **skills**, you are advised to set **visible** to **true** so that the ability can be [implicitly started](explicit-implicit-want-mappings.md) by other applications. If this attribute is set to **false**, the system returns **PERMISSION_DENIED** when other applications attempt to start the ability. In this case, a system application can request the [START_INVISIBLE_ABILITY](../security/permission-list.md) permission to start the ability. Example abilities with **visible** set to **false** are home screen, voice assistant, or search assistant. diff --git a/en/application-dev/application-models/request-permissions.md b/en/application-dev/application-models/request-permissions.md index 670860d87dbb56adceb02f4ca350c24b61260d30..a29b793c9949d1080c8322681ba27cf6495f29ac 100644 --- a/en/application-dev/application-models/request-permissions.md +++ b/en/application-dev/application-models/request-permissions.md @@ -9,37 +9,7 @@ During application development, you must declare the required permission in the To declare a permission in **config.json**, add **reqPermissions** under **module** and list the permission. +For example, to request the permission to access the calendar, perform the following steps: -For example, to declare the permission to access the calendar, request the **ohos.permission.READ_CALENDAR** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). - - -The sample code in the **config.json** file is as follows: - -```json -{ - "module": { - // ... - "reqPermissions": [ - { - "name": "ohos.permission.READ_CALENDAR" - // ... - } - ] - } -} -``` - - -Request the permission from uses in the form of a dialog box: - -```ts -import featureAbility from '@ohos.ability.featureAbility'; - -let context = featureAbility.getContext(); -let permissions: Array = ['ohos.permission.READ_CALENDAR'] -context.requestPermissionsFromUser(permissions, 1).then((data) => { - console.info("Succeed to request permission from user with data: " + JSON.stringify(data)) -}).catch((error) => { - console.info("Failed to request permission from user with error: " + JSON.stringify(error)) -}) -``` +1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). +2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). diff --git a/en/application-dev/application-models/service-widget-overview.md b/en/application-dev/application-models/service-widget-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..3739129f2a07765b2ebe015910d1d6e3d8d721d0 --- /dev/null +++ b/en/application-dev/application-models/service-widget-overview.md @@ -0,0 +1,58 @@ +# Service Widget Overview + + +A service widget (also called widget) is a set of UI components that display important information or operations specific to an application. It provides users with direct access to a desired application service, without the need to open the application first. A widget usually appears as a part of the UI of another application (which currently can only be a system application, such as the home screen) and provides basic interactive features such as opening a UI page or sending a message. + + +## Service Widget Architecture + + **Figure 1** Service widget architecture + +![WidgetArchitecture](figures/WidgetArchitecture.png) + +Before you get started, it would be helpful if you have a basic understanding of the following concepts: + +- Widget host: an application that displays the widget content and controls the widget location. An example is the home screen in the preceding figure. + + - Application icon: an application entry icon, clicking which starts the application process. The icon content does not support interactions. + - Widget: an interactive UI in various sizes. It may provide buttons to implement different functions, such as the button to [update the widget content](arkts-ui-widget-event-formextensionability.md) or [switch to an application](arkts-ui-widget-event-router.md). + +- Card provider: an application that provides service widget content to be displayed. It controls the display content, display logic, and component click events triggered on a service widget. + + - FormExtensionAbility: widget service logic module, which provides lifecycle callbacks invoked when a widget is created, destroyed, or updated. + - Widget page: widget UI module, which contains display and interaction information such as components, layouts, and events. + +Below is the typical procedure of using the widget: + + **Figure 2** Typical procedure of using the widget + +![WidgetUse](figures/WidgetUse.png) + +1. Touch and hold an application icon on the home screen to display the shortcut menu. + +2. Touch **Service widget** to access the preview screen. + +3. Touch the **Add to home** button. The widget is then added to the home screen. + + +## Widget UI Development Mode + +In the stage model, the UI of a widget can be developed in [ArkTS](arkts-ui-widget-working-principles.md) or [JS](js-ui-widget-development.md). + +- A widget developed in the ArkTS-based declarative development paradigm is called ArkTS widget. + +- A widget developed in the JS-compatible web-like development paradigm is called JS widget. + +ArkTS widgets and JS widgets have different implementation principles and features. The following table lists the differences in capabilities. + +| Category| JS widget| ArkTS widget| +| -------- | -------- | -------- | +| Development paradigm| Web-like paradigm| Declarative paradigm| +| Component capability| Supported| Supported| +| Layout capability| Supported| Supported| +| Event capability| Supported| Supported| +| Custom animation| Not supported| Supported| +| Custom drawing| Not supported| Supported| +| Logic code execution (excluding the import capability)| Not supported| Supported| + +As can be seen above, ArkTS widgets have more capabilities and use cases than JS widgets. Therefore, ArkTS widgets are always recommended, except for the case where the widget consists of only static pages. diff --git a/en/application-dev/application-models/serviceextensionability.md b/en/application-dev/application-models/serviceextensionability.md index 9ff7a0ae5d6df7574da19565c81411236dba3dda..2e9aaeb48100d86d0cd1c7a0e69ea01bf4ef2340 100644 --- a/en/application-dev/application-models/serviceextensionability.md +++ b/en/application-dev/application-models/serviceextensionability.md @@ -1,125 +1,151 @@ # ServiceExtensionAbility +## Overview -[ServiceExtensionAbility](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md) is an ExtensionAbility component of the service type that provides extension capabilities related to background services. +[ServiceExtensionAbility](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md) is an ExtensionAbility component of the SERVICE type that provides capabilities related to background services. It holds an internal [ServiceExtensionContext](../reference/apis/js-apis-inner-application-serviceExtensionContext.md), through which a variety of APIs are provided for external systems. +In this document, the started ServiceExtensionAbility component is called the server, and the component that starts ServiceExtensionAbility is called the client. -ServiceExtensionAbility can be started or connected by other application components to process transactions in the background based on the request of the caller. System applications can call the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartserviceextensionability) method to start background services or call the [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextconnectserviceextensionability) method to connect to background services. Third-party applications can call only **connectServiceExtensionAbility()** to connect to background services. The differences between starting and connecting to background services are as follows: +A ServiceExtensionAbility can be started or connected by other components to process transactions in the background based on the request of the caller. System applications can call the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartserviceextensionability) method to start background services or call the [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextconnectserviceextensionability) method to connect to background services. Third-party applications can call only **connectServiceExtensionAbility()** to connect to background services. The differences between starting and connecting to background services are as follows: +- **Starting**: In the case that AbilityA starts ServiceB, they are weakly associated. After AbilityA exits, ServiceB can still exist. -- In the case that AbilityA starts ServiceB, they are weakly associated. After AbilityA exits, ServiceB can still exist. +- **Connecting**: In the case that AbilityA connects to ServiceB, they are strongly associated. After AbilityA exits, ServiceB also exits. -- In the case that AbilityA connects to ServiceB, they are strongly associated. After AbilityA exits, ServiceB also exits. +Note the following: +- If a ServiceExtensionAbility is started only by means of connecting, its lifecycle is controlled by the client. A new connection is set up each time the client calls the [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextconnectserviceextensionability) method. When the client exits or calls the [disconnectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextdisconnectserviceextensionability) method, the connection is disconnected. After all connections are disconnected, the ServiceExtensionAbility automatically exits. -Each type of ExtensionAbility has its own context. ServiceExtensionAbility has [ServiceExtensionContext](../reference/apis/js-apis-inner-application-serviceExtensionContext.md). In this document, the started ServiceExtensionAbility component is called the server, and the component that starts ServiceExtensionAbility is called the client. - - -This topic describes how to use ServiceExtensionAbility in the following scenarios: - - -- [Implementing a Background Service (for System Applications Only)](#implementing-a-background-service-for-system-applications-only) - -- [Starting a Background Service (for System Applications Only)](#starting-a-background-service-for-system-applications-only) - -- [Connecting to a Background Service](#connecting-to-a-background-service) - +- Once a ServiceExtensionAbility is started by means of starting, it will not exit automatically. System applications can call the [stopServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstopserviceextensionability) method to stop it. > **NOTE** -> - OpenHarmony does not support third-party applications in implementing ServiceExtensionAbility. If you want to implement transaction processing in the background, use background tasks. For details, see [Background Task](../task-management/background-task-overview.md). -> +> +> - Currently, third-party applications cannot implement ServiceExtensionAbility. If you want to implement transaction processing in the background, use background tasks. For details, see [Background Task](../task-management/background-task-overview.md). > - UIAbility of a third-party application can connect to ServiceExtensionAbility provided by the system through the context. -> > - Third-party applications can connect to ServiceExtensionAbility provided by the system only when they gain focus in the foreground. - -## Implementing a Background Service (for System Applications Only) +## Lifecycle [ServiceExtensionAbility](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md) provides the callbacks **onCreate()**, **onRequest()**, **onConnect()**, **onDisconnect()**, and **onDestory()**. Override them as required. The following figure shows the lifecycle of ServiceExtensionAbility. - **Figure 1** ServiceExtensionAbility lifecycle +**Figure 1** ServiceExtensionAbility lifecycle ![ServiceExtensionAbility-lifecycle](figures/ServiceExtensionAbility-lifecycle.png) - **onCreate** - - This callback is triggered when a service is created for the first time. You can perform initialization operations, for example, registering a common event listener. + + This callback is triggered when a ServiceExtensionAbility is created for the first time. You can perform initialization operations, for example, registering a common event listener. > **NOTE** > - > If a service has been created, starting it again does not trigger the **onCreate()** callback. + > If a ServiceExtensionAbility has been created, starting it again does not trigger the **onCreate()** callback. - **onRequest** - - This callback is triggered when another component calls the **startServiceExtensionAbility()** method to start the service. After being started, the service runs in the background. + + This callback is triggered when another component calls the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartserviceextensionability) method to start a ServiceExtensionAbility. After being started, the ServiceExtensionAbility runs in the background. This callback is triggered each time the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartserviceextensionability) method is called. - **onConnect** - - This callback is triggered when another component calls the **connectServiceExtensionAbility()** method to connect to the service. In this method, a remote proxy object (IRemoteObject) is returned, through which the client communicates with the server by means of RPC. + + This callback is triggered when another component calls the [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextconnectserviceextensionability) method to connect to a ServiceExtensionAbility. In this method, a remote proxy object (IRemoteObject) is returned, through which the client communicates with the server by means of RPC. At the same time, the system stores the remote proxy object (IRemoteObject). If another component calls the [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextconnectserviceextensionability) method to connect to this ServiceExtensionAbility, the system directly returns the saved remote proxy object (IRemoteObject) and does not trigger the callback. - **onDisconnect** - - This callback is triggered when a component calls the **disconnectServiceExtensionAbility()** method to disconnect from the service. + + This callback is triggered when the last connection is disconnected. A connection is disconnected when the client exits or the [disconnectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextdisconnectserviceextensionability) method is called. - **onDestroy** - This callback is triggered when the service is no longer used and the instance is ready for destruction. You can clear resources in this callback, for example, deregister the listener. + This callback is triggered when ServiceExtensionAbility is no longer used and the instance is ready for destruction. You can clear resources in this callback, for example, deregister the listener. +## Implementing a Background Service (for System Applications Only) -## How to Develop +### Preparations -To implement a background service, manually create a ServiceExtensionAbility component in DevEco Studio. The procedure is as follows: +Only system applications can implement ServiceExtensionAbility. You must make the following preparations before development: -1. In the **ets** directory of the **Module** project, right-click and choose **New > Directory** to create a directory named **serviceextability**. +- **Switching to the full SDK**: All APIs related to ServiceExtensionAbility are marked as system APIs and hidden by default. Therefore, you must manually obtain the full SDK from the mirror and switch to it in DevEco Studio. For details, see [Guide to Switching to Full SDK](../quick-start/full-sdk-switch-guide.md). -2. In the **serviceextability** directory, right-click and choose **New > TypeScript File** to create a file named **ServiceExtAbility.ts**. +- **Requesting the AllowAppUsePrivilegeExtension privilege**: Only applications with the **AllowAppUsePrivilegeExtension** privilege can develop ServiceExtensionAbility. For details about how to request the privilege, see [Application Privilege Configuration Guide](../../device-dev/subsystems/subsys-app-privilege-config-guide.md). -3. Open the **ServiceExtAbility.ts** file, import the [RPC module](../reference/apis/js-apis-rpc.md), and reload the **onRemoteMessageRequest()** method to receive messages from the client and return the processing result to the client. **REQUEST_VALUE** is used to verify the service request code sent by the client. - - ```ts - import rpc from '@ohos.rpc'; - - const REQUEST_CODE = 99; - - class StubTest extends rpc.RemoteObject { - constructor(des) { - super(des); - } - - // Receive a message from the client and return the processing result to the client. - onRemoteMessageRequest(code, data, reply, option) { - if (code === REQUEST_CODE) { - // Receive data from the client. - // If the client calls data.writeInt() multiple times to write multiple pieces of data, the server can call data.readInt() multiple times to receive all the data. - let optFir = data.readInt(); - let optSec = data.readInt(); - // The server returns the data processing result to the client. - // In the example, the server receives two pieces of data and returns the sum of the two pieces of data to the client. - reply.writeInt(optFir + optSec); - } - return true; - } - - // Send messages to the client in synchronous or asynchronous mode. - sendRequest(code, data, reply, options) { - return null; - } - } - ``` +### Defining IDL Interfaces + +As a background service, ServiceExtensionAbility needs to provide interfaces that can be called by external systems. You can define the interfaces in IDL files and use the [IDL tool](../IDL/idl-guidelines.md) to generate proxy and stub files. The following demonstrates how to define a file named **IIdlServiceExt.idl**: + +```cpp +interface OHOS.IIdlServiceExt { + int ProcessData([in] int data); + void InsertDataToMap([in] String key, [in] int val); +} +``` + +Create the **IdlServiceExt** directory in the **ets** directory corresponding to the module of the DevEco Studio project, and copy the files generated by the [IDL tool](../IDL/idl-guidelines.md) to this directory. Then create a file named **idl_service_ext_impl.ts** to implement the IDL interfaces. + +``` +├── ets +│ ├── IdlServiceExt +│ │ ├── i_idl_service_ext.ts # File generated. +│ │ ├── idl_service_ext_proxy.ts # File generated. +│ │ ├── idl_service_ext_stub.ts # File generated. +│ │ ├── idl_service_ext_impl.ts # Custom file used to implement IDL interfaces. +│ └ +└ +``` + +An example of **idl_service_ext_impl.ts** is as follows: + +```ts +import {processDataCallback} from './i_idl_service_ext'; +import {insertDataToMapCallback} from './i_idl_service_ext'; +import IdlServiceExtStub from './idl_service_ext_stub'; + +const ERR_OK = 0; +const TAG: string = "[IdlServiceExtImpl]"; + +// You need to implement interfaces in this type. +export default class ServiceExtImpl extends IdlServiceExtStub { + processData(data: number, callback: processDataCallback): void { + // Implement service logic. + console.info(TAG, `processData: ${data}`); + callback(ERR_OK, data + 1); + } + + insertDataToMap(key: string, val: number, callback: insertDataToMapCallback): void { + // Implement service logic. + console.log(TAG, `insertDataToMap, key: ${key} val: ${val}`); + callback(ERR_OK); + } +} +``` + +### Creating a ServiceExtensionAbility + +To manually create a ServiceExtensionAbility in the DevEco Studio project, perform the following steps: + +1. In the **ets** directory of the **Module** project, right-click and choose **New > Directory** to create a directory named **ServiceExtAbility**. + +2. In the **ServiceExtAbility** directory, right-click and choose **New > TypeScript File** to create a file named **ServiceExtAbility.ts**. + + ``` + ├── ets + │ ├── IdlServiceExt + │ │ ├── i_idl_service_ext.ts # File generated. + │ │ ├── idl_service_ext_proxy.ts # File generated. + │ │ ├── idl_service_ext_stub.ts # File generated. + │ │ ├── idl_service_ext_impl.ts # Custom file used to implement IDL interfaces. + │ ├── ServiceExtAbility + │ │ ├── ServiceExtAbility.ts + └ + ``` + +3. In the **ServiceExtAbility.ts** file, add the dependency package for importing ServiceExtensionAbility. Customize a class that inherits from ServiceExtensionAbility and implement the lifecycle callbacks, and return the previously defined **ServiceExtImpl** object in the **onConnect** lifecycle callback. -4. In the **ServiceExtAbility.ts** file, add the dependency package for importing ServiceExtensionAbility. Customize a class that inherits from ServiceExtensionAbility and add the required lifecycle callbacks. - ```ts import ServiceExtensionAbility from '@ohos.app.ability.ServiceExtensionAbility'; - import rpc from '@ohos.rpc'; - - const TAG: string = "[Example].[Entry].[ServiceExtAbility]"; - const REQUEST_CODE = 99; + import ServiceExtImpl from '../IdlServiceExt/idl_service_ext_impl'; - class StubTest extends rpc.RemoteObject { - // ... - } + const TAG: string = "[ServiceExtAbility]"; export default class ServiceExtAbility extends ServiceExtensionAbility { + serviceExtImpl = new ServiceExtImpl("ExtImpl"); + onCreate(want) { console.info(TAG, `onCreate, want: ${want.abilityName}`); } @@ -130,7 +156,8 @@ To implement a background service, manually create a ServiceExtensionAbility com onConnect(want) { console.info(TAG, `onConnect, want: ${want.abilityName}`); - return new StubTest("test"); + // Return the ServiceExtImpl object, through which the client can communicate with the ServiceExtensionAbility. + return this.serviceExtImpl; } onDisconnect(want) { @@ -143,8 +170,8 @@ To implement a background service, manually create a ServiceExtensionAbility com } ``` -5. Register ServiceExtensionAbility in the [module.json5 file](../quick-start/module-configuration-file.md) corresponding to the **Module** project. Set **type** to **"service"** and **srcEntrance** to the code path of the ExtensionAbility component. - +4. Register ServiceExtensionAbility in the [module.json5 file](../quick-start/module-configuration-file.md) corresponding to the **Module** project. Set **type** to **"service"** and **srcEntry** to the code path of the ServiceExtensionAbility component. + ```json { "module": { @@ -155,15 +182,14 @@ To implement a background service, manually create a ServiceExtensionAbility com "icon": "$media:icon", "description": "service", "type": "service", - "visible": true, - "srcEntrance": "./ets/serviceextability/ServiceExtAbility.ts" + "exported": true, + "srcEntry": "./ets/ServiceExtAbility/ServiceExtAbility.ts" } ] } } ``` - ## Starting a Background Service (for System Applications Only) A system application uses the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartserviceextensionability) method to start a background service. The [onRequest()](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md#serviceextensionabilityonrequest) callback is invoked, and the **Want** object passed by the caller is received through the callback. After the background service is started, its lifecycle is independent of that of the client. In other words, even if the client is destroyed, the background service can still run. Therefore, the background service must be stopped by calling [terminateSelf()](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextterminateself) when its work is complete. Alternatively, another component can call [stopServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstopserviceextensionability) to stop the background service. @@ -173,7 +199,7 @@ A system application uses the [startServiceExtensionAbility()](../reference/apis > [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartserviceextensionability), [stopServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstopserviceextensionability), and [terminateSelf()](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextterminateself) of ServiceExtensionContext are system APIs and cannot be called by third-party applications. 1. Start a new ServiceExtensionAbility in a system application. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). - + ```ts let want = { "deviceId": "", @@ -188,7 +214,7 @@ A system application uses the [startServiceExtensionAbility()](../reference/apis ``` 2. Stop ServiceExtensionAbility in the system application. - + ```ts let want = { "deviceId": "", @@ -203,7 +229,7 @@ A system application uses the [startServiceExtensionAbility()](../reference/apis ``` 3. ServiceExtensionAbility stops itself. - + ```ts // this is the current ServiceExtensionAbility component. this.context.terminateSelf().then(() => { @@ -213,35 +239,71 @@ A system application uses the [startServiceExtensionAbility()](../reference/apis }) ``` - > **NOTE** > > Background services can run in the background for a long time. To minimize resource usage, destroy it in time when a background service finishes the task of the requester. A background service can be stopped in either of the following ways: > > - The background service calls the [terminateSelf()](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextterminateself) method to automatically stop itself. -> > - Another component calls the [stopServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstopserviceextensionability) method to stop the background service. -> > After either method is called, the system destroys the background service. - ## Connecting to a Background Service -Either a system application or a third-party application can connect to a service (service specified in the **Want** object) through [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextconnectserviceextensionability). The [onConnect()](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md#serviceextensionabilityonconnect) callback is invoked, and the **Want** object passed by the caller is received through the callback. In this way, a persistent connection is established. +Either a system application or a third-party application can connect to a ServiceExtensionAbility (specified in the **Want** object) through [connectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextconnectserviceextensionability). The [onConnect()](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md#serviceextensionabilityonconnect) callback is invoked, and the **Want** object passed by the caller is received through the callback. In this way, a persistent connection is established. The ServiceExtensionAbility component returns an IRemoteObject in the **onConnect()** callback. Through this IRemoteObject, you can define communication interfaces for RPC interaction between the client and server. Multiple clients can connect to the same background service at the same time. After a client finishes the interaction, it must call [disconnectServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextdisconnectserviceextensionability) to disconnect from the service. If all clients connected to a background service are disconnected, the system destroys the service. - Call **connectServiceExtensionAbility()** to establish a connection to a background service. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). ```ts - import rpc from '@ohos.rpc'; - - const REQUEST_CODE = 99; let want = { "deviceId": "", "bundleName": "com.example.myapplication", "abilityName": "ServiceExtAbility" }; + let options = { + onConnect(elementName, remote) { + /* The input parameter remote is the object returned by ServiceExtensionAbility in the onConnect lifecycle callback. + * This object is used for communication with ServiceExtensionAbility. For details, see the section below. + */ + console.info('onConnect callback'); + if (remote === null) { + console.info(`onConnect remote is null`); + return; + } + }, + onDisconnect(elementName) { + console.info('onDisconnect callback') + }, + onFailed(code) { + console.info('onFailed callback') + } + } + // The ID returned after the connection is set up must be saved. The ID will be passed for service disconnection. + let connectionId = this.context.connectServiceExtensionAbility(want, options); + ``` + +- Use **disconnectServiceExtensionAbility()** to disconnect from the background service. + + ```ts + // connectionId is returned when connectServiceExtensionAbility is called and needs to be manually maintained. + this.context.disconnectServiceExtensionAbility(connectionId).then((data) => { + console.info('disconnectServiceExtensionAbility success'); + }).catch((error) => { + console.error('disconnectServiceExtensionAbility failed'); + }) + ``` + +## Communication Between the Client and Server + +After obtaining the [rpc.RemoteObject](../reference/apis/js-apis-rpc.md#iremoteobject) object from the **onConnect()** callback, the client can communicate with ServiceExtensionAbility in either of the following ways: + +- Using the IDL interface provided by the server for communication (recommended) + + ```ts + // The client needs to import idl_service_ext_proxy.ts provided by the server for external systems to the local project. + import IdlServiceExtProxy from '../IdlServiceExt/idl_service_ext_proxy'; + let options = { onConnect(elementName, remote) { console.info('onConnect callback'); @@ -249,23 +311,54 @@ The ServiceExtensionAbility component returns an IRemoteObject in the **onConnec console.info(`onConnect remote is null`); return; } + let serviceExtProxy = new IdlServiceExtProxy(remote); + // Communication is carried out by interface calling, without exposing RPC details. + serviceExtProxy.processData(1, (errorCode, retVal) => { + console.log(`processData, errorCode: ${errorCode}, retVal: ${retVal}`); + }); + serviceExtProxy.insertDataToMap('theKey', 1, (errorCode) => { + console.log(`insertDataToMap, errorCode: ${errorCode}`); + }) + }, + onDisconnect(elementName) { + console.info('onDisconnect callback') + }, + onFailed(code) { + console.info('onFailed callback') + } + } + ``` + +- Calling [sendMessageRequest](../reference/apis/js-apis-rpc.md#sendmessagerequest9) to send messages to the server (not recommended) + + ```ts + import rpc from '@ohos.rpc'; + + const REQUEST_CODE = 1; + let options = { + onConnect(elementName, remote) { + console.info('onConnect callback'); + if (remote === null) { + console.info(`onConnect remote is null`); + return; + } + // Directly call the RPC interface to send messages to the server. The client needs to serialize the input parameters and deserialize the return values. The process is complex. let option = new rpc.MessageOption(); - let data = new rpc.MessageParcel(); - let reply = new rpc.MessageParcel(); + let data = new rpc.MessageSequence(); + let reply = new rpc.MessageSequence(); data.writeInt(100); - data.writeInt(200); - + // @param code Indicates the service request code sent by the client. - // @param data Indicates the {@link MessageParcel} object sent by the client. + // @param data Indicates the {@link MessageSequence} object sent by the client. // @param reply Indicates the response message object sent by the remote service. // @param options Specifies whether the operation is synchronous or asynchronous. // // @return Returns {@code true} if the operation is successful; returns {@code false} otherwise. - remote.sendRequest(REQUEST_CODE, data, reply, option).then((ret) => { + remote.sendMessageRequest(REQUEST_CODE, data, reply, option).then((ret) => { let msg = reply.readInt(); - console.info(`sendRequest ret:${ret} msg:${msg}`); + console.info(`sendMessageRequest ret:${ret} msg:${msg}`); }).catch((error) => { - console.info('sendRequest failed'); + console.info('sendMessageRequest failed'); }); }, onDisconnect(elementName) { @@ -275,18 +368,90 @@ The ServiceExtensionAbility component returns an IRemoteObject in the **onConnec console.info('onFailed callback') } } - // The ID returned after the connection is set up must be saved. The ID will be passed for service disconnection. - let connectionId = this.context.connectServiceExtensionAbility(want, options); ``` -- Use **disconnectServiceExtensionAbility()** to disconnect from the background service. - +## Client Identity Verification by the Server + +When ServiceExtensionAbility is used to provide sensitive services, the client identity must be verified. You can implement the verification on the stub of the IDL interface. For details about the IDL interface implementation, see [Defining IDL Interfaces](#defining-idl-interfaces). Two verification modes are recommended: + +- **Verifying the client identity based on callerUid** + + Call the [getCallingUid()](../reference/apis/js-apis-rpc.md#getcallinguid) method to obtain the UID of the client, and then call the [getBundleNameByUid()](../reference/apis/js-apis-bundleManager.md#bundlemanagergetbundlenamebyuid) method to obtain the corresponding bundle name. In this way, the client identify is verified. Note that [getBundleNameByUid()](../reference/apis/js-apis-bundleManager.md#bundlemanagergetbundlenamebyuid) is asynchronous, and therefore the server cannot return the verification result to the client. This verification mode applies when the client sends an asynchronous task execution request to the server. The sample code is as follows: + ```ts - let connectionId = 1 // ID returned when the service is connected through connectServiceExtensionAbility. - this.context.disconnectServiceExtensionAbility(connectionId).then((data) => { - console.info('disconnectServiceExtensionAbility success'); - }).catch((error) => { - console.error('disconnectServiceExtensionAbility failed'); - }) + import rpc from '@ohos.rpc'; + import bundleManager from '@ohos.bundle.bundleManager'; + import {processDataCallback} from './i_idl_service_ext'; + import {insertDataToMapCallback} from './i_idl_service_ext'; + import IdlServiceExtStub from './idl_service_ext_stub'; + + const ERR_OK = 0; + const ERR_DENY = -1; + const TAG: string = "[IdlServiceExtImpl]"; + + export default class ServiceExtImpl extends IdlServiceExtStub { + processData(data: number, callback: processDataCallback): void { + console.info(TAG, `processData: ${data}`); + + let callerUid = rpc.IPCSkeleton.getCallingUid(); + bundleManager.getBundleNameByUid(callerUid).then((callerBundleName) => { + console.info(TAG, 'getBundleNameByUid: ' + callerBundleName); + // Identify the bundle name of the client. + if (callerBundleName != 'com.example.connectextapp') { // The verification fails. + console.info(TAG, 'The caller bundle is not in whitelist, reject'); + return; + } + // The verification is successful, and service logic is executed normally. + }).catch(err => { + console.info(TAG, 'getBundleNameByUid failed: ' + err.message); + }); + } + + insertDataToMap(key: string, val: number, callback: insertDataToMapCallback): void { + // Implement service logic. + console.log(TAG, `insertDataToMap, key: ${key} val: ${val}`); + callback(ERR_OK); + } + } + ``` + +- **Verifying the client identity based on callerTokenId** + + Call the [getCallingTokenId()](../reference/apis/js-apis-rpc.md#getcallingtokenid) method to obtain the token ID of the client, and then call the [verifyAccessTokenSync()](../reference/apis/js-apis-abilityAccessCtrl.md#verifyaccesstokensync) method to check whether the client has a specific permission. Currently, OpenHarmony does not support permission customization. Therefore, only [system-defined permissions](../security/permission-list.md) can be verified. The sample code is as follows: + + ```ts + import rpc from '@ohos.rpc'; + import abilityAccessCtrl from '@ohos.abilityAccessCtrl'; + import {processDataCallback} from './i_idl_service_ext'; + import {insertDataToMapCallback} from './i_idl_service_ext'; + import IdlServiceExtStub from './idl_service_ext_stub'; + + const ERR_OK = 0; + const ERR_DENY = -1; + const TAG: string = "[IdlServiceExtImpl]"; + + export default class ServiceExtImpl extends IdlServiceExtStub { + processData(data: number, callback: processDataCallback): void { + console.info(TAG, `processData: ${data}`); + + let callerTokenId = rpc.IPCSkeleton.getCallingTokenId(); + let accessManger = abilityAccessCtrl.createAtManager(); + // The permission to be verified varies depending on the service requirements. ohos.permission.SET_WALLPAPER is only an example. + let grantStatus = + accessManger.verifyAccessTokenSync(callerTokenId, "ohos.permission.SET_WALLPAPER"); + if (grantStatus === abilityAccessCtrl.GrantStatus.PERMISSION_DENIED) { + console.info(TAG, `PERMISSION_DENIED`); + callback(ERR_DENY, data); // The verification fails and an error is returned. + return; + } + callback(ERR_OK, data + 1); // The verification is successful, and service logic is executed normally. + } + + insertDataToMap(key: string, val: number, callback: insertDataToMapCallback): void { + // Implement service logic. + console.log(TAG, `insertDataToMap, key: ${key} val: ${val}`); + callback(ERR_OK); + } + } ``` diff --git a/en/application-dev/application-models/stage-model-development-overview.md b/en/application-dev/application-models/stage-model-development-overview.md index d7f8123a379fc7950820e531a14f45dfca68f961..451649bdb1a63147b79f8c7e2d4523d6c651c548 100644 --- a/en/application-dev/application-models/stage-model-development-overview.md +++ b/en/application-dev/application-models/stage-model-development-overview.md @@ -10,7 +10,7 @@ The following figure shows the basic concepts used in the stage model. - [UIAbility component](uiability-overview.md) and [ExtensionAbility component](extensionability-overview.md) - The stage model provides two types of application components: UIAbility and ExtensionAbility. Both have specific classes and support object-oriented development. They are the specific implementation of the abstract ability concept on the stage model. They are also units scheduled by the Ability Manager Service (AMS), which schedules their lifecycles as well. + The stage model provides two types of application components: UIAbility and ExtensionAbility. Both have specific classes and support object-oriented development. - UIAbility has the UI and is mainly used for user interaction. For example, with UIAbility, the Gallery application can display images in the liquid layout. After a user selects an image, it uses a new UI to display the image details. The user can touch the **Back** button to return to the liquid layout. The lifecycle of the UIAbility component contains the creation, destruction, foreground, and background states. Display-related states are exposed through WindowStage events. @@ -22,6 +22,7 @@ The following figure shows the basic concepts used in the stage model. - [Context](application-context-stage.md) In the stage model, Context and its derived classes provide a variety of resources and capabilities that can be called during the runtime. The UIAbility component and ExtensionAbility derived classes have different Context classes. These classes, which all inherit from the base class Context, provide different capabilities. + - [AbilityStage](abilitystage.md) Each HAP of the Entry or Feature type has an AbilityStage class instance during the runtime. When the code in the HAP is loaded to the process for the first time, the system creates an AbilityStage class instance first. Each UIAbility class defined in the HAP is associated with this class instance after instantiation. Through this class instance, you can obtain the runtime information of the UIAbility instances in the HAP. diff --git a/en/application-dev/application-models/start-page.md b/en/application-dev/application-models/start-page.md index 58966d93cba037eaad141caaed0feaaaa672cde1..5831ea0c6f6b6fa9d954134ef723f70e925e3ed7 100644 --- a/en/application-dev/application-models/start-page.md +++ b/en/application-dev/application-models/start-page.md @@ -70,7 +70,7 @@ struct Index { ``` -When the launch type of a PageAbility is set to **standard** or when the PageAbility with the launch type set to **singleton** is started for the first time, you can use the **parameters** parameter in **want** to transfer the pages information and use the **startAbility()** method to start the PageAbility. For details about the launch type, see [PageAbility Launch Type](pageability-launch-type.md). The target PageAbility can use the **featureAbility.getWant()** method in **onCreate** to obtain the **want** parameter, and then call **router.push** to start a specified page. +When a PageAbility in multiton mode is started or when the PageAbility in singleton mode is started for the first time, you can use the **parameters** parameter in **want** to transfer the pages information and use the **startAbility()** method to start the PageAbility. For details about the launch type, see [PageAbility Launch Type](pageability-launch-type.md). The target PageAbility can use the **featureAbility.getWant()** method in **onCreate** to obtain the **want** parameter, and then call **router.push** to start a specified page. When a user touches the button on the page of the caller PageAbility, the **startAbility()** method is called to start the target PageAbility. The **want** parameter in **startAbility()** carries the specified page information. diff --git a/en/application-dev/application-models/thread-model-stage.md b/en/application-dev/application-models/thread-model-stage.md index deaab60b7bd7549dcb96bc00d7896d5c67e5c5d2..4ca9fb3ed369f78cf12054c7b6da085b8640b1db 100644 --- a/en/application-dev/application-models/thread-model-stage.md +++ b/en/application-dev/application-models/thread-model-stage.md @@ -2,19 +2,14 @@ For an OpenHarmony application, each process has a main thread to provide the following functionalities: -- Manage other threads. - -- Enable multiple UIAbility components of the same application to share the same main thread. - -- Distribute input events. - - Draw the UI. - -- Invoke application code callbacks (event processing and lifecycle callbacks). - +- Manage the ArkTS engine instance of the main thread so that multiple UIAbility components can run on it. +- Manage ArkTS engine instances of other threads (such as the worker thread), for example, starting and terminating other threads. +- Distribute interaction events. +- Process application code callbacks (event processing and lifecycle management). - Receive messages sent by the worker thread. -In addition to the main thread, there is an independent thread, named worker. The worker thread is mainly used to perform time-consuming operations. It cannot directly operate the UI. The worker thread is created in the main thread and is independent of the main thread. A maximum of seven worker threads can be created. +In addition to the main thread, there is an independent thread, named worker. The worker thread is mainly used to perform time-consuming operations. The worker thread is created in the main thread and is independent from the main thread. It cannot directly operate the UI. A maximum of seven worker threads can be created. ![thread-model-stage](figures/thread-model-stage.png) @@ -22,4 +17,5 @@ Based on the OpenHarmony thread model, different services run on different threa > **NOTE** > -> The stage model provides only the main thread and worker thread. Emitter is mainly used for event synchronization within the main thread or between the main thread and worker thread. +> - The stage model provides only the main thread and worker thread. Emitter is mainly used for event synchronization within the main thread or between the main thread and worker thread. +> - To view thread information about an application process, run the **hdc shell** command to enter the shell CLI of the device, and then run the **ps -p ** -T command**, where ** indicates the ID of the application process. diff --git a/en/application-dev/application-models/uiability-data-sync-with-ui.md b/en/application-dev/application-models/uiability-data-sync-with-ui.md index 9ed8c8d6f3b307ef44097f1ff67e6dcf472f91a5..52967c25c86966710853378c95c74d5e6e13e432 100644 --- a/en/application-dev/application-models/uiability-data-sync-with-ui.md +++ b/en/application-dev/application-models/uiability-data-sync-with-ui.md @@ -80,7 +80,7 @@ Before using the APIs provided by **EventHub**, you must obtain an **EventHub** 4. After **event1** is used, you can call [eventHub.off()](../reference/apis/js-apis-inner-application-eventHub.md#eventhuboff) to unsubscribe from the event. ```ts - // context is the ability-level context of the UIAbility instance. + // context is the AbilityContext of the UIAbility instance. this.context.eventHub.off('event1'); ``` @@ -124,14 +124,14 @@ By binding attributes or methods to **globalThis**, you can implement data synch ```ts let entryAbilityWant; - + @Entry @Component struct Index { aboutToAppear() { entryAbilityWant = globalThis.entryAbilityWant; } - + // Page display. build() { // ... @@ -161,7 +161,7 @@ To implement data synchronization between two UIAbility components in the same a ```ts import UIAbility from '@ohos.app.ability.UIAbility' - + export default class UIAbilityB extends UIAbility { onCreate(want, launch) { // UIAbilityB reads name from globalThis and outputs it. @@ -194,7 +194,7 @@ To implement data synchronization between the UIAbility and ExtensionAbility com ```ts import Extension from '@ohos.app.ability.ServiceExtensionAbility' - + export default class ServiceExtAbility extends Extension { onCreate(want) { / / ServiceExtAbility reads name from globalThis and outputs it. @@ -240,10 +240,6 @@ The following provides an example to describe the object overwritten problem in struct Index { onPageShow() { let ctx = globalThis.context; // Obtain the context from globalThis and use it. - let permissions = ['com.example.permission'] - ctx.requestPermissionsFromUser(permissions,(result) => { - // ... - }); } // Page display. build() { @@ -274,10 +270,6 @@ The following provides an example to describe the object overwritten problem in struct Index { onPageShow() { let ctx = globalThis.context; // Obtain the context from globalThis and use it. - let permissions = ['com.example.permission'] - ctx.requestPermissionsFromUser(permissions,(result) => { - console.info('requestPermissionsFromUser result:' + JSON.stringify(result)); - }); } // Page display. build() { @@ -307,10 +299,6 @@ The following provides an example to describe the object overwritten problem in struct Index { onPageShow() { let ctx = globalThis.context; // The context in globalThis is the context of UIAbilityB. - let permissions=['com.example.permission']; - ctx.requestPermissionsFromUser(permissions,(result) => { // Using this object causes a process breakdown. - console.info('requestPermissionsFromUser result:' + JSON.stringify(result)); - }); } // Page display. build() { @@ -321,4 +309,6 @@ The following provides an example to describe the object overwritten problem in ## Using AppStorage or LocalStorage for Data Synchronization -ArkUI provides AppStorage and LocalStorage to implement application- and UIAbility-level data synchronization, respectively. Both solutions can be used to manage the application state, enhance application performance, and improve user experience. The AppStorage is a global state manager and is applicable when multiple UIAbilities share the same state data. The LocalStorage is a local state manager that manages state data used inside a single UIAbility. They help you control the application state more flexibly and improve the maintainability and scalability of applications. For details, see [State Management of Application-Level Variables](../quick-start/arkts-state-mgmt-application-level.md). +ArkUI provides AppStorage and LocalStorage to implement application- and UIAbility-level data synchronization, respectively. Both solutions can be used to manage the application state, enhance application performance, and improve user experience. The AppStorage is a global state manager and is applicable when multiple UIAbilities share the same state data. The LocalStorage is a local state manager that manages state data used inside a single UIAbility. They help you control the application state more flexibly and improve the maintainability and scalability of applications. For details, see [State Management of Application-Level Variables](../quick-start/arkts-application-state-management-overview.md). + + \ No newline at end of file diff --git a/en/application-dev/application-models/uiability-intra-device-interaction.md b/en/application-dev/application-models/uiability-intra-device-interaction.md index 25f509e9032a3670664937307fe4691b6a09bc28..9dbbc2be90107d2131a1cdae21e576cb4771966e 100644 --- a/en/application-dev/application-models/uiability-intra-device-interaction.md +++ b/en/application-dev/application-models/uiability-intra-device-interaction.md @@ -19,7 +19,7 @@ This topic describes the UIAbility interaction modes in the following scenarios. - [Starting a Specified Page of UIAbility](#starting-a-specified-page-of-uiability) -- [Using Ability Call to Implement UIAbility Interaction (for System Applications Only)](#using-ability-call-to-implement-uiability-interaction-for-system-applications-only) +- [Using Call to Implement UIAbility Interaction (for System Applications Only)](#using-call-to-implement-uiability-interaction-for-system-applications-only) ## Starting UIAbility in the Same Application @@ -31,45 +31,52 @@ Assume that your application has two UIAbility components: EntryAbility and Func 1. In EntryAbility, call [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start UIAbility. The [want](../reference/apis/js-apis-app-ability-want.md) parameter is the entry parameter for starting the UIAbility instance. In the **want** parameter, **bundleName** indicates the bundle name of the application to start; **abilityName** indicates the name of the UIAbility to start; **moduleName** is required only when the target UIAbility belongs to a different module; **parameters** is used to carry custom information. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). ```ts + let context = ...; // UIAbilityContext let wantInfo = { - deviceId: '', // An empty deviceId indicates the local device. - bundleName: 'com.example.myapplication', - abilityName: 'FuncAbility', - moduleName: 'module1', // moduleName is optional. - parameters: {// Custom information. - info: 'From the Index page of EntryAbility', - }, + deviceId: '', // An empty deviceId indicates the local device. + bundleName: 'com.example.myapplication', + abilityName: 'FuncAbility', + moduleName: 'module1', // moduleName is optional. + parameters: {// Custom information. + info: 'From the Index page of EntryAbility', + }, } - // context is the ability-level context of the initiator UIAbility. - this.context.startAbility(wantInfo).then(() => { - // ... + // context is the UIAbilityContext of the initiator UIAbility. + context.startAbility(wantInfo).then(() => { + // ... }).catch((err) => { - // ... + // ... }) ``` - -2. Use the FuncAbility lifecycle callback to receive the parameters passed from EntryAbility. + +2. In FuncAbility, use [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) or [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonnewwant) to receive the parameters passed in by EntryAbility. ```ts import UIAbility from '@ohos.app.ability.UIAbility'; import window from '@ohos.window'; export default class FuncAbility extends UIAbility { - onCreate(want, launchParam) { - // Receive the parameters passed by the caller UIAbility. - let funcAbilityWant = want; - let info = funcAbilityWant?.parameters?.info; - // ... - } + onCreate(want, launchParam) { + // Receive the parameters passed by the initiator UIAbility. + let funcAbilityWant = want; + let info = funcAbilityWant?.parameters?.info; + // ... + } } ``` + > **NOTE**
+ > + > In FuncAbility started, you can obtain the PID and bundle name of the UIAbility through **parameters** in the passed **want** parameter. + 3. To stop the **UIAbility** instance after the FuncAbility service is complete, call [terminateSelf()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateself) in FuncAbility. ```ts - // context is the ability-level context of the UIAbility instance to stop. - this.context.terminateSelf((err) => { - // ... + let context = ...; // UIAbilityContext + + // context is the UIAbilityContext of the UIAbility instance to stop. + context.terminateSelf((err) => { + // ... }); ``` @@ -87,67 +94,70 @@ When starting FuncAbility from EntryAbility, you want the result to be returned 1. In EntryAbility, call [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to start FuncAbility. Use **data** in the asynchronous callback to receive information returned after FuncAbility stops itself. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). ```ts + let context = ...; // UIAbilityContext let wantInfo = { - deviceId: '', // An empty deviceId indicates the local device. - bundleName: 'com.example.myapplication', - abilityName: 'FuncAbility', - moduleName: 'module1', // moduleName is optional. - parameters: {// Custom information. - info: 'From the Index page of EntryAbility', - }, + deviceId: '', // An empty deviceId indicates the local device. + bundleName: 'com.example.myapplication', + abilityName: 'FuncAbility', + moduleName: 'module1', // moduleName is optional. + parameters: {// Custom information. + info: 'From the Index page of EntryAbility', + }, } - // context is the ability-level context of the initiator UIAbility. - this.context.startAbilityForResult(wantInfo).then((data) => { - // ... + // context is the UIAbilityContext of the initiator UIAbility. + context.startAbilityForResult(wantInfo).then((data) => { + // ... }).catch((err) => { - // ... + // ... }) ``` - + 2. Call [terminateSelfWithResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to stop FuncAbility. Use the input parameter **abilityResult** to carry the information that FuncAbility needs to return to EntryAbility. ```ts + let context = ...; // UIAbilityContext const RESULT_CODE: number = 1001; let abilityResult = { - resultCode: RESULT_CODE, - want: { - bundleName: 'com.example.myapplication', - abilityName: 'FuncAbility', - moduleName: 'module1', - parameters: { - info: 'From the Index page of FuncAbility', - }, + resultCode: RESULT_CODE, + want: { + bundleName: 'com.example.myapplication', + abilityName: 'FuncAbility', + moduleName: 'module1', + parameters: { + info: 'From the Index page of FuncAbility', }, + }, } - // context is the ability-level context of the callee UIAbility. - this.context.terminateSelfWithResult(abilityResult, (err) => { - // ... + // context is the AbilityContext of the target UIAbility. + context.terminateSelfWithResult(abilityResult, (err) => { + // ... }); ``` - + 3. After FuncAbility stops itself, EntryAbility uses [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to receive the information returned by FuncAbility. The value of **RESULT_CODE** must be the same as the preceding value. ```ts + let context = ...; // UIAbilityContext const RESULT_CODE: number = 1001; // ... - // context is the ability-level context of the initiator UIAbility. - this.context.startAbilityForResult(want).then((data) => { - if (data?.resultCode === RESULT_CODE) { - // Parse the information returned by the callee UIAbility. - let info = data.want?.parameters?.info; - // ... - } - }).catch((err) => { + // context is the UIAbilityContext of the initiator UIAbility. + context.startAbilityForResult(wantInfo).then((data) => { + if (data?.resultCode === RESULT_CODE) { + // Parse the information returned by the target UIAbility. + let info = data.want?.parameters?.info; // ... + } + }).catch((err) => { + // ... }) ``` ## Starting UIAbility of Another Application -Generally, the user only needs to do a common operation (for example, selecting a document application to view the document content) to start the UIAbility of another application. The [implicit Want launch mode](want-overview.md#types-of-want) is recommended. The system identifies a matched UIAbility and starts it based on the **want** parameter of the caller. +Generally, the user only needs to do a common operation (for example, selecting a document application to view the document content) to start the UIAbility of another application. The [implicit Want launch mode](want-overview.md#types-of-want) is recommended. The system identifies a matched UIAbility and starts it based on the **want** parameter of the initiator UIAbility. There are two ways to start **UIAbility**: [explicit and implicit](want-overview.md). @@ -183,35 +193,38 @@ This section describes how to start the UIAbility of another application through } ``` -2. Include **entities** and **actions** of the caller's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). +2. Include **entities** and **actions** of the initiator UIAbility's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). ```ts + let context = ...; // UIAbilityContext let wantInfo = { - deviceId: '', // An empty deviceId indicates the local device. - // Uncomment the line below if you want to implicitly query data only in the specific bundle. - // bundleName: 'com.example.myapplication', - action: 'ohos.want.action.viewData', - // entities can be omitted. - entities: ['entity.system.default'], + deviceId: '', // An empty deviceId indicates the local device. + // Uncomment the line below if you want to implicitly query data only in the specific bundle. + // bundleName: 'com.example.myapplication', + action: 'ohos.want.action.viewData', + // entities can be omitted. + entities: ['entity.system.default'], } - // context is the ability-level context of the initiator UIAbility. - this.context.startAbility(wantInfo).then(() => { - // ... + // context is the UIAbilityContext of the initiator UIAbility. + context.startAbility(wantInfo).then(() => { + // ... }).catch((err) => { - // ... + // ... }) ``` - - The following figure shows the effect. When you click **Open PDF**, a dialog box is displayed for you to select. + + The following figure shows the effect. When you click **Open PDF**, a dialog box is displayed for you to select. ![](figures/uiability-intra-device-interaction.png) 3. To stop the **UIAbility** instance after the document application is used, call [terminateSelf()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateself). ```ts - // context is the ability-level context of the UIAbility instance to stop. - this.context.terminateSelf((err) => { - // ... + let context = ...; // UIAbilityContext + + // context is the UIAbilityContext of the UIAbility instance to stop. + context.terminateSelf((err) => { + // ... }); ``` @@ -246,65 +259,68 @@ If you want to obtain the return result when using implicit Want to start the UI } ``` -2. Call [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to start the UIAbility of the payment application. Include **entities** and **actions** of the caller's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. Use **data** in the asynchronous callback to receive the information returned to the caller after the payment UIAbility stops itself. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select. +2. Call [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to start the UIAbility of the payment application. Include **entities** and **actions** of the initiator UIAbility's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. Use **data** in the asynchronous callback to receive the information returned to the initiator UIAbility after the payment UIAbility stops itself. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select. ```ts + let context = ...; // UIAbilityContext let wantInfo = { - deviceId: '', // An empty deviceId indicates the local device. - // Uncomment the line below if you want to implicitly query data only in the specific bundle. - // bundleName: 'com.example.myapplication', - action: 'ohos.want.action.editData', - // entities can be omitted. - entities: ['entity.system.default'], + deviceId: '', // An empty deviceId indicates the local device. + // Uncomment the line below if you want to implicitly query data only in the specific bundle. + // bundleName: 'com.example.myapplication', + action: 'ohos.want.action.editData', + // entities can be omitted. + entities: ['entity.system.default'], } - // context is the ability-level context of the initiator UIAbility. - this.context.startAbilityForResult(wantInfo).then((data) => { - // ... + // context is the UIAbilityContext of the initiator UIAbility. + context.startAbilityForResult(wantInfo).then((data) => { + // ... }).catch((err) => { - // ... + // ... }) ``` - + 3. After the payment is finished, call [terminateSelfWithResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to stop the payment UIAbility and return the **abilityResult** parameter. ```ts + let context = ...; // UIAbilityContext const RESULT_CODE: number = 1001; let abilityResult = { - resultCode: RESULT_CODE, - want: { - bundleName: 'com.example.myapplication', - abilityName: 'EntryAbility', - moduleName: 'entry', - parameters: { - payResult: 'OKay', - }, + resultCode: RESULT_CODE, + want: { + bundleName: 'com.example.myapplication', + abilityName: 'EntryAbility', + moduleName: 'entry', + parameters: { + payResult: 'OKay', }, + }, } - // context is the ability-level context of the callee UIAbility. - this.context.terminateSelfWithResult(abilityResult, (err) => { - // ... + // context is the AbilityContext of the target UIAbility. + context.terminateSelfWithResult(abilityResult, (err) => { + // ... }); ``` - + 4. Receive the information returned by the payment application in the callback of the [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) method. The value of **RESULT_CODE** must be the same as that returned by [terminateSelfWithResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult). ```ts + let context = ...; // UIAbilityContext const RESULT_CODE: number = 1001; let want = { // Want parameter information. }; - // context is the ability-level context of the initiator UIAbility. - this.context.startAbilityForResult(want).then((data) => { - if (data?.resultCode === RESULT_CODE) { - // Parse the information returned by the callee UIAbility. - let payResult = data.want?.parameters?.payResult; - // ... - } - }).catch((err) => { + // context is the UIAbilityContext of the initiator UIAbility. + context.startAbilityForResult(want).then((data) => { + if (data?.resultCode === RESULT_CODE) { + // Parse the information returned by the target UIAbility. + let payResult = data.want?.parameters?.payResult; // ... + } + }).catch((err) => { + // ... }) ``` @@ -323,7 +339,7 @@ The window mode is specified by the **windowMode** field in the [StartOptions](. > **NOTE** > > 1. If the **windowMode** field is not specified, the UIAbility is started in the default window mode. -> 2. To ensure that the application can be displayed in the required window mode, check the **supportWindowMode** field in the [abilities tag](../quick-start/module-configuration-file.md#abilities) in the [module.json5 file](../quick-start/module-configuration-file.md) of the UIAbility and make sure the specified window mode is supported. +> 2. To ensure that the application can be displayed in the required window mode, check the **supportWindowMode** field in the [abilities](../quick-start/module-configuration-file.md#abilities) tag in the [module.json5 file](../quick-start/module-configuration-file.md) of the UIAbility and make sure the specified window mode is supported. The following uses the floating window mode as an example to describe how to start the FuncAbility from the EntryAbility page. @@ -335,6 +351,7 @@ For details about how to obtain the context, see [Obtaining the Context of UIAbi ```ts import AbilityConstant from '@ohos.app.ability.AbilityConstant'; +let context = ...; // UIAbilityContext let wantInfo = { deviceId: '', // An empty deviceId indicates the local device. bundleName: 'com.example.myapplication', @@ -347,8 +364,8 @@ let wantInfo = { let options = { windowMode: AbilityConstant.WindowMode.WINDOW_MODE_FLOATING } -// context is the ability-level context of the initiator UIAbility. -this.context.startAbility(wantInfo, options).then(() => { +// context is the UIAbilityContext of the initiator UIAbility. +context.startAbility(wantInfo, options).then(() => { // ... }).catch((err) => { // ... @@ -365,10 +382,11 @@ A UIAbility component can have multiple pages. When it is started in different s ### Specifying a Startup Page -When the caller UIAbility starts another UIAbility, it usually needs to redirect to a specified page. For example, FuncAbility contains two pages: Index (corresponding to the home page) and Second (corresponding to function A page). You can configure the specified page URL in the **want** parameter by adding a custom parameter to **parameters** in **want**. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). +When the initiator UIAbility starts another UIAbility, it usually needs to redirect to a specified page. For example, FuncAbility contains two pages: Index (corresponding to the home page) and Second (corresponding to function A page). You can configure the specified page URL in the **want** parameter by adding a custom parameter to **parameters** in **want**. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability). ```ts +let context = ...; // UIAbilityContext let wantInfo = { deviceId: '', // An empty deviceId indicates the local device. bundleName: 'com.example.myapplication', @@ -378,8 +396,8 @@ let wantInfo = { router: 'funcA', }, } -// context is the ability-level context of the initiator UIAbility. -this.context.startAbility(wantInfo).then(() => { +// context is the UIAbilityContext of the initiator UIAbility. +context.startAbility(wantInfo).then(() => { // ... }).catch((err) => { // ... @@ -397,25 +415,25 @@ import UIAbility from '@ohos.app.ability.UIAbility' import Window from '@ohos.window' export default class FuncAbility extends UIAbility { - funcAbilityWant; - - onCreate(want, launchParam) { - // Receive the parameters passed by the caller UIAbility. - this.funcAbilityWant = want; - } - - onWindowStageCreate(windowStage: Window.WindowStage) { - // Main window is created. Set a main page for this ability. - let url = 'pages/Index'; - if (this.funcAbilityWant?.parameters?.router) { - if (this.funcAbilityWant.parameters.router === 'funA') { - url = 'pages/Second'; - } - } - windowStage.loadContent(url, (err, data) => { - // ... - }); + funcAbilityWant; + + onCreate(want, launchParam) { + // Receive the parameters passed by the initiator UIAbility. + this.funcAbilityWant = want; + } + + onWindowStageCreate(windowStage: Window.WindowStage) { + // Main window is created. Set a main page for this UIAbility. + let url = 'pages/Index'; + if (this.funcAbilityWant?.parameters?.router) { + if (this.funcAbilityWant.parameters.router === 'funA') { + url = 'pages/Second'; + } } + windowStage.loadContent(url, (err, data) => { + // ... + }); + } } ``` @@ -434,11 +452,11 @@ In summary, when a UIAbility instance of application A has been created and the import UIAbility from '@ohos.app.ability.UIAbility' export default class FuncAbility extends UIAbility { - onNewWant(want, launchParam) { - // Receive the parameters passed by the caller UIAbility. - globalThis.funcAbilityWant = want; - // ... - } + onNewWant(want, launchParam) { + // Receive the parameters passed by the initiator UIAbility. + globalThis.funcAbilityWant = want; + // ... + } } ``` @@ -469,215 +487,200 @@ In summary, when a UIAbility instance of application A has been created and the > **NOTE** > -> When the [launch type of the callee UIAbility](uiability-launch-type.md) is set to **standard**, a new instance is created each time the callee UIAbility is started. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback will not be invoked. +> When the [launch type of the target UIAbility](uiability-launch-type.md) is set to **multiton**, a new instance is created each time the target UIAbility is started. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback will not be invoked. -## Using Ability Call to Implement UIAbility Interaction (for System Applications Only) +## Using Call to Implement UIAbility Interaction (for System Applications Only) -Ability call is an extension of the UIAbility capability. It enables the UIAbility to be invoked by and communicate with external systems. The UIAbility invoked can be either started in the foreground or created and run in the background. You can use the ability call to implement data sharing between two UIAbility instances (caller ability and callee ability) through IPC. +Call is an extension of the UIAbility capability. It enables the UIAbility to be invoked by and communicate with external systems. The UIAbility invoked can be either started in the foreground or created and run in the background. You can use the call to implement data sharing between two UIAbility instances (CallerAbility and CalleeAbility) through IPC. -The core API used for the ability call is **startAbilityByCall**, which differs from **startAbility** in the following ways: +The core API used for the call is **startAbilityByCall**, which differs from **startAbility** in the following ways: -- **startAbilityByCall** supports ability launch in the foreground and background, whereas **startAbility** supports ability launch in the foreground only. +- **startAbilityByCall** supports UIAbility launch in the foreground and background, whereas **startAbility** supports UIAbility launch in the foreground only. -- The caller ability can use the caller object returned by **startAbilityByCall** to communicate with the callee ability, but **startAbility** does not provide the communication capability. +- The CallerAbility can use the caller object returned by **startAbilityByCall** to communicate with the CalleeAbility, but **startAbility** does not provide the communication capability. -Ability call is usually used in the following scenarios: +Call is usually used in the following scenarios: -- Communicating with the callee ability +- Communicating with the CalleeAbility -- Starting the callee ability in the background +- Starting the CalleeAbility in the background -**Table 1** Terms used in the ability call +**Table 1** Terms used in the call | **Term**| Description| | -------- | -------- | -| CallerAbility | UIAbility that triggers the ability call.| -| CalleeAbility | UIAbility invoked by the ability call.| -| Caller | Object returned by **startAbilityByCall** and used by the caller ability to communicate with the callee ability.| -| Callee | Object held by the callee ability to communicate with the caller ability.| +| CallerAbility| UIAbility that triggers the call.| +| CalleeAbility | UIAbility invoked by the call.| +| Caller | Object returned by **startAbilityByCall** and used by the CallerAbility to communicate with the CalleeAbility.| +| Callee | Object held by the CalleeAbility to communicate with the CallerAbility.| -The following figure shows the ability call process. +The following figure shows the call process. - Figure 1 Ability call process + Figure 1 Call process ![call](figures/call.png) -- The caller ability uses **startAbilityByCall** to obtain a caller object and uses **call()** of the caller object to send data to the callee ability. +- The CallerAbility uses **startAbilityByCall** to obtain a caller object and uses **call()** of the caller object to send data to the CalleeAbility. -- The callee ability, which holds a **Callee** object, uses **on()** of the **Callee** object to register a callback. This callback is invoked when the callee ability receives data from the caller ability. +- The CalleeAbility, which holds a **Callee** object, uses **on()** of the **Callee** object to register a callback. This callback is invoked when the CalleeAbility receives data from the CallerAbility. > **NOTE** -> 1. Currently, only system applications can use the ability call. +> 1. Currently, only system applications can use the call. > -> 2. The launch type of the callee ability must be **singleton**. +> 2. The launch type of the CalleeAbility must be **singleton**. > -> 3. Both local (intra-device) and cross-device ability calls are supported. The following describes how to initiate a local call. For details about how to initiate a cross-device ability call, see [Using Cross-Device Ability Call](hop-multi-device-collaboration.md#using-cross-device-ability-call). +> 3. Both local (intra-device) and cross-device calls are supported. The following describes how to initiate a local call. For details about how to initiate a cross-device call, see [Using Cross-Device Call](hop-multi-device-collaboration.md#using-cross-device-call). ### Available APIs -The following table describes the main APIs used for the ability call. For details, see [AbilityContext](../reference/apis/js-apis-app-ability-uiAbility.md#caller). +The following table describes the main APIs used for the call. For details, see [AbilityContext](../reference/apis/js-apis-app-ability-uiAbility.md#caller). -**Table 2** Ability call APIs + **Table 2** Call APIs | API| Description| | -------- | -------- | | startAbilityByCall(want: Want): Promise<Caller> | Starts a UIAbility in the foreground (through the **want** configuration) or background (default) and obtains the caller object for communication with the UIAbility. For details, see [AbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartabilitybycall) or [ServiceExtensionContext](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextstartabilitybycall).| -| on(method: string, callback: CalleeCallBack): void | Callback invoked when the callee ability registers a method.| -| off(method: string): void | Callback invoked when the callee ability deregisters a method.| -| call(method: string, data: rpc.Parcelable): Promise<void> | Sends agreed parcelable data to the callee ability.| -| callWithResult(method: string, data: rpc.Parcelable): Promise<rpc.MessageSequence> | Sends agreed parcelable data to the callee ability and obtains the agreed parcelable data returned by the callee ability.| +| on(method: string, callback: CalleeCallBack): void | Callback invoked when the CalleeAbility registers a method.| +| off(method: string): void | Callback invoked when the CalleeAbility deregisters a method.| +| call(method: string, data: rpc.Parcelable): Promise<void> | Sends agreed parcelable data to the CalleeAbility.| +| callWithResult(method: string, data: rpc.Parcelable): Promise<rpc.MessageSequence> | Sends agreed parcelable data to the CalleeAbility and obtains the agreed parcelable data returned by the CalleeAbility.| | release(): void | Releases the caller object.| | on(type: "release", callback: OnReleaseCallback): void | Callback invoked when the caller object is released.| -The implementation of using the ability call for UIAbility interaction involves two parts. +The implementation of using the call for UIAbility interaction involves two parts. -- [Creating a Callee Ability](#creating-a-callee-ability) +- [Creating a CalleeAbility](#creating-a-calleeability) -- [Accessing the Callee Ability](#accessing-the-callee-ability) +- [Accessing the CalleeAbility](#accessing-the-calleeability) -### Creating a Callee Ability +### Creating a CalleeAbility -For the callee ability, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener. +For the CalleeAbility, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener. -1. Configure the ability launch type. - - Set **launchType** of the callee ability to **singleton** in the **module.json5** file. +1. Configure the launch type of the UIAbility. - | JSON Field| Description| - | -------- | -------- | - | "launchType" | Ability launch type. Set this parameter to **singleton**.| - - An example of the ability configuration is as follows: - - - ```json - "abilities":[{ - "name": ".CalleeAbility", - "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts", - "launchType": "singleton", - "description": "$string:CalleeAbility_desc", - "icon": "$media:icon", - "label": "$string:CalleeAbility_label", - "visible": true - }] - ``` + For example, set the launch type of the CalleeAbility to **singleton**. For details, see [UIAbility Component Launch Type](uiability-launch-type.md). 2. Import the **UIAbility** module. - + ```ts - import Ability from '@ohos.app.ability.UIAbility'; + import UIAbility from '@ohos.app.ability.UIAbility'; ``` 3. Define the agreed parcelable data. - The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string. - + The data formats sent and received by the CallerAbility and CalleeAbility must be consistent. In the following example, the data formats are number and string. + ```ts export default class MyParcelable { - num: number = 0 - str: string = "" + num: number = 0; + str: string = ''; - constructor(num, string) { - this.num = num - this.str = string - } + constructor(num, string) { + this.num = num; + this.str = string; + } - marshalling(messageSequence) { - messageSequence.writeInt(this.num) - messageSequence.writeString(this.str) - return true - } + marshalling(messageSequence) { + messageSequence.writeInt(this.num); + messageSequence.writeString(this.str); + return true + } - unmarshalling(messageSequence) { - this.num = messageSequence.readInt() - this.str = messageSequence.readString() - return true - } + unmarshalling(messageSequence) { + this.num = messageSequence.readInt(); + this.str = messageSequence.readString(); + return true; + } } ``` 4. Implement **Callee.on** and **Callee.off**. - The time to register a listener for the callee ability depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the ability and deregistered in **onDestroy**. After receiving parcelable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The sample code is as follows: + The time to register a listener for the CalleeAbility depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the UIAbility and deregistered in **onDestroy**. After receiving parcelable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The sample code is as follows: + + ```ts const TAG: string = '[CalleeAbility]'; const MSG_SEND_METHOD: string = 'CallSendMsg'; function sendMsgCallback(data) { - console.info('CalleeSortFunc called'); + console.info('CalleeSortFunc called'); - // Obtain the parcelable data sent by the caller ability. - let receivedData = new MyParcelable(0, ''); - data.readParcelable(receivedData); - console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`); + // Obtain the parcelable data sent by the CallerAbility. + let receivedData = new MyParcelable(0, ''); + data.readParcelable(receivedData); + console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`); - // Process the data. - // Return the parcelable data result to the caller ability. - return new MyParcelable(receivedData.num + 1, `send ${receivedData.str} succeed`); + // Process the data. + // Return the parcelable data result to the CallerAbility. + return new MyParcelable(receivedData.num + 1, `send ${receivedData.str} succeed`); } - export default class CalleeAbility extends Ability { - onCreate(want, launchParam) { - try { - this.callee.on(MSG_SEND_METHOD, sendMsgCallback); - } catch (error) { - console.info(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`); - } + export default class CalleeAbility extends UIAbility { + onCreate(want, launchParam) { + try { + this.callee.on(MSG_SEND_METHOD, sendMsgCallback); + } catch (error) { + console.info(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`); } + } - onDestroy() { - try { - this.callee.off(MSG_SEND_METHOD); - } catch (error) { - console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`); - } + onDestroy() { + try { + this.callee.off(MSG_SEND_METHOD); + } catch (error) { + console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`); } + } } ``` -### Accessing the Callee Ability + +### Accessing the CalleeAbility 1. Import the **UIAbility** module. ```ts - import Ability from '@ohos.app.ability.UIAbility'; + import UIAbility from '@ohos.app.ability.UIAbility'; ``` 2. Obtain the caller interface. - The **context** attribute of the ability implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the caller object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements. + The **UIAbilityContext** attribute implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **UIAbilityContext**, uses **startAbilityByCall** to start the CalleeAbility, obtain the caller object, and register the **onRelease** listener of the CallerAbility. You need to implement processing based on service requirements. + ```ts - // Register the onRelease() listener of the caller ability. + // Register the onRelease() listener of the CallerAbility. private regOnRelease(caller) { - try { - caller.on("release", (msg) => { - console.info(`caller onRelease is called ${msg}`); - }) - console.info('caller register OnRelease succeed'); - } catch (error) { - console.info(`caller register OnRelease failed with ${error}`); - } + try { + caller.on('release', (msg) => { + console.info(`caller onRelease is called ${msg}`); + }) + console.info('caller register OnRelease succeed'); + } catch (error) { + console.info(`caller register OnRelease failed with ${error}`); + } } async onButtonGetCaller() { - try { - this.caller = await context.startAbilityByCall({ - bundleName: 'com.samples.CallApplication', - abilityName: 'CalleeAbility' - }) - if (this.caller === undefined) { - console.info('get caller failed') - return - } - console.info('get caller success') - this.regOnRelease(this.caller) - } catch (error) { - console.info(`get caller failed with ${error}`) + try { + this.caller = await context.startAbilityByCall({ + bundleName: 'com.samples.CallApplication', + abilityName: 'CalleeAbility' + }) + if (this.caller === undefined) { + console.info('get caller failed') + return } + console.info('get caller success') + this.regOnRelease(this.caller) + } catch (error) { + console.info(`get caller failed with ${error}`) + } } ``` diff --git a/en/application-dev/application-models/uiability-launch-type.md b/en/application-dev/application-models/uiability-launch-type.md index 5f2f21b1aff90a27e7307d82045941ec76b98475..8f5762fcde0a766f454a2540708368048e1b01aa 100644 --- a/en/application-dev/application-models/uiability-launch-type.md +++ b/en/application-dev/application-models/uiability-launch-type.md @@ -6,7 +6,7 @@ The launch type of the UIAbility component refers to the state of the UIAbility - [Singleton](#singleton) -- [Standard](#standard) +- [Multiton](#multiton) - [Specified](#specified) @@ -17,10 +17,12 @@ The launch type of the UIAbility component refers to the state of the UIAbility Each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, if a UIAbility instance of this type already exists in the application process, the instance is reused. Therefore, only one UIAbility instance of this type exists in the system, that is, displayed in **Recents**. -**Figure 1** Demonstration effect in singleton mode -![uiability-launch-type1](figures/uiability-launch-type1.png) +**Figure 1** Demonstration effect in singleton mode +![uiability-launch-type1](figures/uiability-launch-type1.gif) -> **NOTE**
Assume that the application already has a UIAbility instance created, and the launch type of the UIAbility instance is set to **singleton**. If [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called again to start the UIAbility instance, the original UIAbility instance is started, and no new UIAbility instance is created. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback is invoked, but the [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) and [onWindowStageCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonwindowstagecreate) callbacks are not. +> **NOTE** +> +> Assume that the application already has a UIAbility instance created, and the launch type of the UIAbility instance is set to **singleton**. If [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called again to start the UIAbility instance, the original UIAbility instance is started, and no new UIAbility instance is created. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback is invoked, but the [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) and [onWindowStageCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonwindowstagecreate) callbacks are not. To use the singleton mode, set **launchType** in the [module.json5 configuration file](../quick-start/module-configuration-file.md) to **singleton**. @@ -40,14 +42,15 @@ To use the singleton mode, set **launchType** in the [module.json5 configuration ``` -## Standard +## Multiton -In standard mode, each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, a new UIAbility instance of this type is created in the application process. Multiple UIAbility instances of this type are displayed in **Recents**. +In multiton mode, each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, a new UIAbility instance of this type is created in the application process. Multiple UIAbility instances of this type are displayed in **Recents**. -**Figure 2** Demonstration effect in standard mode -![standard-mode](figures/standard-mode.png) +**Figure 2** Demonstration effect in multiton mode -To use the standard mode, set **launchType** in the [module.json5 configuration file](../quick-start/module-configuration-file.md) to **standard**. +![uiability-launch-type2](figures/uiability-launch-type2.gif) + +To use the multiton mode, set **launchType** in the [module.json5 file](../quick-start/module-configuration-file.md) to **multiton**. ```json @@ -56,7 +59,7 @@ To use the standard mode, set **launchType** in the [module.json5 configuration // ... "abilities": [ { - "launchType": "standard", + "launchType": "multiton", // ... } ] @@ -69,8 +72,8 @@ To use the standard mode, set **launchType** in the [module.json5 configuration The **specified** mode is used in some special scenarios. For example, in a document application, you want a document instance to be created each time you create a document, but you want to use the same document instance when you repeatedly open an existing document. -**Figure 3** Demonstration effect in specified mode -![uiability-launch-type2](figures/uiability-launch-type2.png) +**Figure 3** Demonstration effect in specified mode +![uiability-launch-type3](figures/uiability-launch-type3.gif) For example, there are two UIAbility components: EntryAbility and SpecifiedAbility (with the launch type **specified**). You are required to start SpecifiedAbility from EntryAbility. @@ -108,7 +111,7 @@ For example, there are two UIAbility components: EntryAbility and SpecifiedAbili instanceKey: getInstance(), }, } - // context is the ability-level context of the initiator UIAbility. + // context is the UIAbilityContext of the initiator UIAbility. this.context.startAbility(want).then(() => { // ... }).catch((err) => { @@ -137,7 +140,7 @@ For example, there are two UIAbility components: EntryAbility and SpecifiedAbili } ``` - > **NOTE**
+ > **NOTE** > > 1. Assume that the application already has a UIAbility instance created, and the launch type of the UIAbility instance is set to **specified**. If [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called again to start the UIAbility instance, and the [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant) callback of [AbilityStage](../reference/apis/js-apis-app-ability-abilityStage.md) matches a created UIAbility instance, the original UIAbility instance is started, and no new UIAbility instance is created. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback is invoked, but the [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) and [onWindowStageCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonwindowstagecreate) callbacks are not. > 2. AbilityStage is not automatically generated in the default project of DevEco Studio. For details about how to create an AbilityStage file, see [AbilityStage Component Container](abilitystage.md). @@ -150,3 +153,5 @@ For example, there are two UIAbility components: EntryAbility and SpecifiedAbili 2. Close the process of file A in **Recents**. UIAbility instance 1 is destroyed. Return to the home screen and open file A again. A new UIAbility instance is started, for example, UIAbility instance 2. 3. Return to the home screen and open file B. A new UIAbility instance is started, for example, UIAbility instance 3. 4. Return to the home screen and open file A again. UIAbility instance 2 is started. This is because the system automatically matches the key of the UIAbility instance and starts the UIAbility instance that has a matching key. In this example, UIAbility instance 2 has the same key as file A. Therefore, the system pulls back UIAbility instance 2 and focuses it without creating a new instance. + + \ No newline at end of file diff --git a/en/application-dev/application-models/uiability-overview.md b/en/application-dev/application-models/uiability-overview.md index 7e31ab130df2ba9eaf959d1bfb3ddccfb7172480..e9a904e060e30e2523902aa4014664f6f0100e3a 100644 --- a/en/application-dev/application-models/uiability-overview.md +++ b/en/application-dev/application-models/uiability-overview.md @@ -5,10 +5,25 @@ UIAbility is a type of application component that provides the UI for user interaction. -UIAbility is the basic unit scheduled by the system and provides a window for applications to draw UIs. An application can contain one or more UIAbility components. For example, for a payment application, you can use two UIAbility components to carry the entry and payment functionalities. You are advised to use one UIAbility component to carry the same functional module, with multiple pages (if necessary). +The following design philosophy is behind UIAbility: + +1. Native support for [cross-device migration](hop-cross-device-migration.md) and [multi-device collaboration](hop-multi-device-collaboration.md) at the application component level + +2. Support for multiple device types and window forms + +For details, see [Interpretation of the Application Model] (application-model-description.md). + +The UIAbility division principles and suggestions are as follows: + +UIAbility is the basic unit scheduled by the system and provides a window for applications to draw UIs. An application can contain one or more UIAbility components. For example, for a payment application, you can use two UIAbility components to carry the entry and payment functionalities. Each UIAbility component instance is displayed as a mission in Recents. +You can develop a single UIAbility or multiple UIAbilities for your application based on service requirements. + +- If you want only one mission to be displayed in Recents, use one UIAbility and multiple pages. + +- If you want multiple missions to be displayed in Recents or multiple windows to be opened simultaneously, use multiple UIAbilities. ## Privacy Statement Configuration @@ -22,7 +37,7 @@ To enable an application to properly use a UIAbility component, declare the UIAb "abilities": [ { "name": "EntryAbility", // Name of the UIAbility component. - "srcEntrance": "./ets/entryability/EntryAbility.ts", // Code path of the UIAbility component. + "srcEntry": "./ets/entryability/EntryAbility.ts", // Code path of the UIAbility component. "description": "$string:EntryAbility_desc", // Description of the UIAbility component. "icon": "$media:icon", // Icon of the UIAbility component. "label": "$string:EntryAbility_label", // Label of the UIAbility component. diff --git a/en/application-dev/application-models/widget-development-fa.md b/en/application-dev/application-models/widget-development-fa.md index 17f9ee7234865b5d01e2a5f68e52cf7928739db7..b766c3420f56a7406bc345911a09bbf91bb6a187 100644 --- a/en/application-dev/application-models/widget-development-fa.md +++ b/en/application-dev/application-models/widget-development-fa.md @@ -323,7 +323,7 @@ async function deleteFormInfo(formId: string) { // ... ``` -For details about how to implement persistent data storage, see [Lightweight Data Store Development](../database/database-preference-guidelines.md). +For details about how to implement persistent data storage, see [Application Data Persistence Overview](../database/app-data-persistence-overview.md). The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary. @@ -364,7 +364,7 @@ You can use the web-like paradigm (HML+CSS+JSON) to develop JS widget pages. Thi > **NOTE** > -> Only the JavaScript-based web-like development paradigm is supported when developing the widget UI. +> In the FA model, only the JavaScript-based web-like development paradigm is supported when developing the widget UI. - HML: uses web-like paradigm components to describe the widget page information. diff --git a/en/application-dev/application-models/widget-development-stage.md b/en/application-dev/application-models/widget-development-stage.md deleted file mode 100644 index 73635fbc05c5e11cc0cc72857ccbcc7648bfa451..0000000000000000000000000000000000000000 --- a/en/application-dev/application-models/widget-development-stage.md +++ /dev/null @@ -1,609 +0,0 @@ -# FormExtensionAbility (Widget) - - -## Widget Overview - -FormExtensionAbility provides a service widget (also called widget), which is a set of UI components that display important information or operations specific to an application. It provides users with direct access to a desired application service, without the need to open the application first. - -A widget usually appears as a part of the UI of another application (which currently can only be a system application) and provides basic interactive features such as opening a UI page or sending a message. - -Before you get started, it would be helpful if you have a basic understanding of the following concepts: - -- Widget host: an application that displays the widget content and controls the widget location. - -- Widget Manager: a resident agent that provides widget management features such as periodic widget updates. - -- Widget provider: an atomic service that provides the widget content to display and controls how widget components are laid out and how they interact with users. - - -## Working Principles - -Figure 1 shows the working principles of the widget framework. - -**Figure 1** Widget framework working principles in the stage model -![form-extension](figures/form-extension.png) - -The widget host consists of the following modules: - -- Widget usage: provides operations such as creating, deleting, or updating a widget. - -- Communication adapter: provided by the OpenHarmony SDK for communication with the Widget Manager. It sends widget-related operations to the Widget Manager. - -The Widget Manager consists of the following modules: - -- Periodic updater: starts a scheduled task based on the update policy to periodically update a widget after it is added to the Widget Manager. - -- Cache manager: caches view information of a widget after it is added to the Widget Manager to directly return the cached data when the widget is obtained next time. This reduces the latency greatly. - -- Lifecycle manager: suspends update when a widget is switched to the background or is blocked, and updates and/or clears widget data during upgrade and deletion. - -- Object manager: manages RPC objects of the widget host. It is used to verify requests from the widget host and process callbacks after the widget update. - -- Communication adapter: communicates with the widget host and provider through RPCs. - -The widget provider consists of the following modules: - -- Widget service: implemented by the widget provider developer to process requests on widget creation, update, and deletion, and to provide corresponding widget services. - -- Instance manager: implemented by the widget provider developer for persistent management of widget instances allocated by the Widget Manager. - -- Communication adapter: provided by the OpenHarmony SDK for communication with the Widget Manager. It pushes update data to the Widget Manager. - -> **NOTE** -> -> You only need to develop the widget provider. The system automatically handles the work of the widget host and Widget Manager. - - -## Available APIs - -The **FormExtensionAbility** class has the following APIs. For details, see [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md). - -| API| Description| -| -------- | -------- | -| onAddForm(want: Want): formBindingData.FormBindingData | Called to notify the widget provider that a widget has been created.| -| onCastToNormalForm(formId: string): void | Called to notify the widget provider that a temporary widget has been converted to a normal one.| -| onUpdateForm(formId: string): void | Called to notify the widget provider that a widget has been updated.| -| onChangeFormVisibility(newStatus: { [key: string]: number }): void | Called to notify the widget provider of the change in widget visibility.| -| onFormEvent(formId: string, message: string): void | Called to instruct the widget provider to receive and process a widget event.| -| onRemoveForm(formId: string): void| Called to notify the widget provider that a widget has been destroyed.| -| onConfigurationUpdate(config: Configuration): void | Called when the configuration of the environment where the widget is running is updated.| -| onShareForm?(formId: string): { [key: string]: any }| Called by the widget provider to receive shared widget data.| - -The **FormExtensionAbility** class also has a member context, that is, the FormExtensionContext class. For details, see [FormExtensionContext](../reference/apis/js-apis-inner-application-formExtensionContext.md). - -| API| Description| -| -------- | -------- | -| startAbility(want: Want, callback: AsyncCallback<void>): void | Starts UIAbility of the application to which a widget belongs. This API uses an asynchronous callback to return the result. (This is a system API and cannot be called by third-party applications. You must apply for the permission to use the API.)| -| startAbility(want: Want): Promise<void> | Starts UIAbility of the application to which a widget belongs. This API uses a promise to return the result. (This is a system API and cannot be called by third-party applications. You must apply for the permission to use the API.)| - -The **FormProvider** class has the following APIs. For details, see [FormProvider](../reference/apis/js-apis-app-form-formProvider.md). - -| API| Description| -| -------- | -------- | -| setFormNextRefreshTime(formId: string, minute: number, callback: AsyncCallback<void>): void; | Sets the next refresh time for a widget. This API uses an asynchronous callback to return the result.| -| setFormNextRefreshTime(formId: string, minute: number): Promise<void>; | Sets the next refresh time for a widget. This API uses a promise to return the result.| -| updateForm(formId: string, formBindingData: FormBindingData, callback: AsyncCallback<void>): void; | Updates a widget. This API uses an asynchronous callback to return the result.| -| updateForm(formId: string, formBindingData: FormBindingData): Promise<void>;| Updates a widget. This API uses a promise to return the result.| - -The **FormBindingData** class has the following APIs. For details, see [FormBindingData](../reference/apis/js-apis-app-form-formBindingData.md). - -| API| Description| -| -------- | -------- | -| createFormBindingData(obj?: Object \ string): FormBindingData| | Creates a **FormBindingData** object.| - - -## How to Develop - -The widget provider development based on the [stage model](stage-model-development-overview.md) involves the following key steps: - -- [Creating a FormExtensionAbility Instance](#creating-a-formextensionability-instance): Develop the lifecycle callback functions of FormExtensionAbility. - -- [Configuring the Widget Configuration File](#configuring-the-widget-configuration-file): Configure the application configuration file **module.json5** and profile configuration file. - -- [Persistently Storing Widget Data](#persistently-storing-widget-data): This operation is a form of widget data exchange. - -- [Updating Widget Data](#updating-widget-data): Call **updateForm()** to update the information displayed on a widget. - -- [Developing the Widget UI Page](#developing-the-widget-ui-page): Use HML+CSS+JSON to develop a JS widget UI page. - -- [Developing Widget Events](#developing-widget-events): Add the router and message events for a widget. - - -### Creating a FormExtensionAbility Instance - -To create a widget in the stage model, implement the lifecycle callbacks of **FormExtensionAbility**. Generate a widget template by referring to [Developing a Service Widget](https://developer.harmonyos.com/en/docs/documentation/doc-guides/ohos-development-service-widget-0000001263280425). - -1. Import related modules to **EntryFormAbility.ts**. - - ```ts - import FormExtension from '@ohos.app.form.FormExtensionAbility'; - import formBindingData from '@ohos.app.form.formBindingData'; - import formInfo from '@ohos.app.form.formInfo'; - import formProvider from '@ohos.app.form.formProvider'; - import dataStorage from '@ohos.data.storage'; - ``` - -2. Implement the FormExtension lifecycle callbacks in **EntryFormAbility.ts**. - - ```ts - export default class EntryFormAbility extends FormExtension { - onAddForm(want) { - console.info('[EntryFormAbility] onAddForm'); - // Called when the widget is created. The widget provider should return the widget data binding class. - let obj = { - "title": "titleOnCreate", - "detail": "detailOnCreate" - }; - let formData = formBindingData.createFormBindingData(obj); - return formData; - } - onCastToNormalForm(formId) { - // Called when the widget host converts the temporary widget into a normal one. The widget provider should do something to respond to the conversion. - console.info('[EntryFormAbility] onCastToNormalForm'); - } - onUpdateForm(formId) { - // Override this method to support scheduled updates, periodic updates, or updates requested by the widget host. - console.info('[EntryFormAbility] onUpdateForm'); - let obj = { - "title": "titleOnUpdate", - "detail": "detailOnUpdate" - }; - let formData = formBindingData.createFormBindingData(obj); - formProvider.updateForm(formId, formData).catch((error) => { - console.info('[EntryFormAbility] updateForm, error:' + JSON.stringify(error)); - }); - } - onChangeFormVisibility(newStatus) { - // Called when the widget host initiates an event about visibility changes. The widget provider should do something to respond to the notification. This callback takes effect only for system applications. - console.info('[EntryFormAbility] onChangeFormVisibility'); - } - onFormEvent(formId, message) { - // If the widget supports event triggering, override this method and implement the trigger. - console.info('[EntryFormAbility] onFormEvent'); - } - onRemoveForm(formId) { - // Delete widget data. - console.info('[EntryFormAbility] onRemoveForm'); - } - onConfigurationUpdate(config) { - console.info('[EntryFormAbility] nConfigurationUpdate, config:' + JSON.stringify(config)); - } - onAcquireFormState(want) { - return formInfo.FormState.READY; - } - } - ``` - -> **NOTE** -> -> FormExtensionAbility cannot reside in the background. Therefore, continuous tasks cannot be processed in the widget lifecycle callbacks. - -### Configuring the Widget Configuration File - -1. Configure ExtensionAbility information under **extensionAbilities** in the [module.json5 file](../quick-start/module-configuration-file.md). For a FormExtensionAbility, you must specify **metadata**. Specifically, set **name** to **ohos.extension.form** (fixed), and set **resource** to the index of the widget configuration information. - Example configuration: - - - ```json - { - "module": { - // ... - "extensionAbilities": [ - { - "name": "EntryFormAbility", - "srcEntrance": "./ets/entryformability/EntryFormAbility.ts", - "label": "$string:EntryFormAbility_label", - "description": "$string:EntryFormAbility_desc", - "type": "form", - "metadata": [ - { - "name": "ohos.extension.form", - "resource": "$profile:form_config" - } - ] - } - ] - } - } - ``` - -2. Configure the widget configuration information. In the **metadata** configuration item of FormExtensionAbility, you can specify the resource index of specific configuration information of the widget. For example, if resource is set to **$profile:form_config**, **form_config.json** in the **resources/base/profile/** directory of the development view is used as the profile configuration file of the widget. The following table describes the internal field structure. - **Table 1** Widget profile configuration file - - | Field| Description| Data Type| Initial Value Allowed| - | -------- | -------- | -------- | -------- | - | name | Class name of a widget. The value is a string with a maximum of 127 bytes.| String| No| - | description | Description of the widget. The value can be a string or a resource index to descriptions in multiple languages. The value is a string with a maximum of 255 bytes.| String| Yes (initial value: left empty)| - | src | Full path of the UI code corresponding to the widget.| String| No| - | window | Window-related configurations.| Object| Yes| - | isDefault | Whether the widget is a default one. Each ability has only one default widget.
**true**: The widget is the default one.
**false**: The widget is not the default one.| Boolean| No| - | colorMode | Color mode of the widget.
**auto**: The widget adopts the auto-adaptive color mode.
**dark**: The widget adopts the dark color mode.
**light**: The widget adopts the light color mode.| String| Yes (initial value: **auto**)| - | supportDimensions | Grid styles supported by the widget.
**1 * 2**: indicates a grid with one row and two columns.
**2 * 2**: indicates a grid with two rows and two columns.
**2 * 4**: indicates a grid with two rows and four columns.
**4 * 4**: indicates a grid with four rows and four columns.| String array| No| - | defaultDimension | Default grid style of the widget. The value must be available in the **supportDimensions** array of the widget.| String| No| - | updateEnabled | Whether the widget can be updated periodically.
**true**: The widget can be updated at a specified interval (**updateDuration**) or at the scheduled time (**scheduledUpdateTime**). **updateDuration** takes precedence over **scheduledUpdateTime**.
**false**: The widget cannot be updated periodically.| Boolean| No| - | scheduledUpdateTime | Scheduled time to update the widget. The value is in 24-hour format and accurate to minute.
**updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used.| String| Yes (initial value: **0:0**)| - | updateDuration | Interval to update the widget. The value is a natural number, in the unit of 30 minutes.
If the value is **0**, this field does not take effect.
If the value is a positive integer *N*, the interval is calculated by multiplying *N* and 30 minutes.
**updateDuration** takes precedence over **scheduledUpdateTime**. If both are specified, the value specified by **updateDuration** is used.| Number| Yes (initial value: **0**)| - | formConfigAbility | Link to a specific page of the application. The value is a URI.| String| Yes (initial value: left empty)| - | formVisibleNotify | Whether the widget is allowed to use the widget visibility notification.| String| Yes (initial value: left empty)| - | metaData | Metadata of the widget. This field contains the array of the **customizeData** field.| Object| Yes (initial value: left empty)| - - Example configuration: - - ```json - { - "forms": [ - { - "name": "widget", - "description": "This is a widget.", - "src": "./js/widget/pages/index/index", - "window": { - "designWidth": 720, - "autoDesignWidth": true - }, - "colorMode": "auto", - "isDefault": true, - "updateEnabled": true, - "scheduledUpdateTime": "10:30", - "updateDuration": 1, - "defaultDimension": "2*2", - "supportDimensions": [ - "2*2" - ] - } - ] - } - ``` - - -### Persistently Storing Widget Data - -A widget provider is usually started when it is needed to provide information about a widget. The Widget Manager supports multi-instance management and uses the widget ID to identify an instance. If the widget provider supports widget data modification, it must persistently store the data based on the widget ID, so that it can access the data of the target widget when obtaining, updating, or starting a widget. - - -```ts -const DATA_STORAGE_PATH = "/data/storage/el2/base/haps/form_store"; -async function storeFormInfo(formId: string, formName: string, tempFlag: boolean) { - // Only the widget ID (formId), widget name (formName), and whether the widget is a temporary one (tempFlag) are persistently stored. - let formInfo = { - "formName": formName, - "tempFlag": tempFlag, - "updateCount": 0 - }; - try { - const storage = await dataStorage.getStorage(DATA_STORAGE_PATH); - // Put the widget information. - await storage.put(formId, JSON.stringify(formInfo)); - console.info(`[EntryFormAbility] storeFormInfo, put form info successfully, formId: ${formId}`); - await storage.flush(); - } catch (err) { - console.error(`[EntryFormAbility] failed to storeFormInfo, err: ${JSON.stringify(err)}`); - } -} - -export default class EntryFormAbility extends FormExtension { - // ... - onAddForm(want) { - console.info('[EntryFormAbility] onAddForm'); - - let formId = want.parameters["ohos.extra.param.key.form_identity"]; - let formName = want.parameters["ohos.extra.param.key.form_name"]; - let tempFlag = want.parameters["ohos.extra.param.key.form_temporary"]; - // Persistently store widget data for subsequent use, such as instance acquisition and update. - // Implement this API based on project requirements. - storeFormInfo(formId, formName, tempFlag); - - let obj = { - "title": "titleOnCreate", - "detail": "detailOnCreate" - }; - let formData = formBindingData.createFormBindingData(obj); - return formData; - } -} -``` - -You should override **onRemoveForm** to implement widget data deletion. - - -```ts -const DATA_STORAGE_PATH = "/data/storage/el2/base/haps/form_store"; -async function deleteFormInfo(formId: string) { - try { - const storage = await dataStorage.getStorage(DATA_STORAGE_PATH); - // Delete the widget information. - await storage.delete(formId); - console.info(`[EntryFormAbility] deleteFormInfo, del form info successfully, formId: ${formId}`); - await storage.flush(); - } catch (err) { - console.error(`[EntryFormAbility] failed to deleteFormInfo, err: ${JSON.stringify(err)}`); - } -} - -// ... - -export default class EntryFormAbility extends FormExtension { - // ... - onRemoveForm(formId) { - console.info('[EntryFormAbility] onRemoveForm'); - // Delete the persistent widget instance data. - // Implement this API based on project requirements. - deleteFormInfo(formId); - } -} -``` - -For details about how to implement persistent data storage, see [Lightweight Data Store Development](../database/database-preference-guidelines.md). - -The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary. - -- Normal widget: a widget persistently used by the widget host - -- Temporary widget: a widget temporarily used by the widget host - -Data of a temporary widget will be deleted on the Widget Manager if the widget framework is killed and restarted. The widget provider, however, is not notified of the deletion and still keeps the data. Therefore, the widget provider needs to clear the data of temporary widgets proactively if the data has been kept for a long period of time. If the widget host has converted a temporary widget into a normal one, the widget provider should change the widget data from temporary storage to persistent storage. Otherwise, the widget data may be deleted by mistake. - - -### Updating Widget Data - -When an application initiates a scheduled or periodic update, the application obtains the latest data and calls **updateForm()** to update the widget. - - -```ts -onUpdateForm(formId) { - // Override this method to support scheduled updates, periodic updates, or updates requested by the widget host. - console.info('[EntryFormAbility] onUpdateForm'); - let obj = { - "title": "titleOnUpdate", - "detail": "detailOnUpdate" - }; - let formData = formBindingData.createFormBindingData(obj); - // Call the updateForm() method to update the widget. Only the data passed through the input parameter is updated. Other information remains unchanged. - formProvider.updateForm(formId, formData).catch((error) => { - console.info('[EntryFormAbility] updateForm, error:' + JSON.stringify(error)); - }); -} -``` - - -### Developing the Widget UI Page - -You can use the web-like paradigm (HML+CSS+JSON) to develop JS widget pages. This section describes how to develop a page shown below. - -![widget-development-stage](figures/widget-development-stage.png) - -> **NOTE** -> -> Only the JavaScript-based web-like development paradigm is supported when developing the widget UI. - -- HML: uses web-like paradigm components to describe the widget page information. - - ```html -
- -
- -
-
- {{title}} - {{detail}} -
-
-
- ``` - -- CSS: defines style information about the web-like paradigm components in HML. - - ```css - .container { - flex-direction: column; - justify-content: center; - align-items: center; - } - - .bg-img { - flex-shrink: 0; - height: 100%; - } - - .container-inner { - flex-direction: column; - justify-content: flex-end; - align-items: flex-start; - height: 100%; - width: 100%; - padding: 12px; - } - - .title { - font-size: 19px; - font-weight: bold; - color: white; - text-overflow: ellipsis; - max-lines: 1; - } - - .detail_text { - font-size: 16px; - color: white; - opacity: 0.66; - text-overflow: ellipsis; - max-lines: 1; - margin-top: 6px; - } - ``` - -- JSON: defines data and event interaction on the widget UI page. - - ```json - { - "data": { - "title": "TitleDefault", - "detail": "TextDefault" - }, - "actions": { - "routerEvent": { - "action": "router", - "abilityName": "EntryAbility", - "params": { - "message": "add detail" - } - } - } - } - ``` - - -### Developing Widget Events - -You can set router and message events for components on a widget. The router event applies to ability redirection, and the message event applies to custom click events. - -The key steps are as follows: - -1. Set the **onclick** field in the HML file to **routerEvent** or **messageEvent**, depending on the **actions** settings in the JSON file. - -2. Set the router event. - - **action**: **"router"**, which indicates a router event. - - **abilityName**: name of the ability to redirect to (PageAbility component in the FA model and UIAbility component in the stage model). For example, the default UIAbility name of the stage model created by DevEco Studio is EntryAbility. - - **params**: custom parameters passed to the target ability. Set them as required. The value can be obtained from **parameters** in **want** used for starting the target ability. For example, in the lifecycle function **onCreate** of the main ability in the stage model, you can obtain **want** and its **parameters** field. - -3. Set the message event. - - **action**: **"message"**, which indicates a message event. - - **params**: custom parameters of the message event. Set them as required. The value can be obtained from **message** in the widget lifecycle function **onFormEvent()**. - -The following is an example: - -- HML file: - - ```html -
- -
- -
-
- {{title}} - {{detail}} -
-
-
- ``` - -- CSS file: - - ```css - .container { - flex-direction: column; - justify-content: center; - align-items: center; - } - - .bg-img { - flex-shrink: 0; - height: 100%; - } - - .container-inner { - flex-direction: column; - justify-content: flex-end; - align-items: flex-start; - height: 100%; - width: 100%; - padding: 12px; - } - - .title { - font-size: 19px; - font-weight: bold; - color: white; - text-overflow: ellipsis; - max-lines: 1; - } - - .detail_text { - font-size: 16px; - color: white; - opacity: 0.66; - text-overflow: ellipsis; - max-lines: 1; - margin-top: 6px; - } - ``` - -- JSON file: - - ```json - { - "data": { - "title": "TitleDefault", - "detail": "TextDefault" - }, - "actions": { - "routerEvent": { - "action": "router", - "abilityName": "EntryAbility", - "params": { - "info": "router info", - "message": "router message" - } - }, - "messageEvent": { - "action": "message", - "params": { - "detail": "message detail" - } - } - } - } - ``` - -- Receive the router event and obtain parameters in UIAbility. - - ```ts - import UIAbility from '@ohos.app.ability.UIAbility' - - export default class EntryAbility extends UIAbility { - onCreate(want, launchParam) { - // Obtain the info parameter passed in the router event. - if (want.parameters.info === "router info") { - // Do something. - // console.log("router info:" + want.parameters.info) - } - // Obtain the message parameter passed in the router event. - if (want.parameters.message === "router message") { - // Do something. - // console.log("router message:" + want.parameters.message) - } - } - // ... - }; - ``` - -- Receive the message event in FormExtensionAbility and obtain parameters. - - ```ts - import FormExtension from '@ohos.app.form.FormExtensionAbility'; - - export default class FormAbility extends FormExtension { - // ... - onFormEvent(formId, message) { - // Obtain the detail parameter passed in the message event. - let msg = JSON.parse(message) - if (msg.params.detail === "message detail") { - // Do something. - // console.log("message info:" + msg.params.detail) - } - } - // ... - }; - ``` - -## Restrictions - -To minimize the abuse of **FormExtensionAbility** by third-party applications, the following APIs cannot be invoked in **FormExtensionAbility**: - -- @ohos.ability.particleAbility.d.ts -- @ohos.backgroundTaskManager.d.ts -- @ohos.resourceschedule.backgroundTaskManager.d.ts -- @ohos.multimedia.camera.d.ts -- @ohos.multimedia.audio.d.ts -- @ohos.multimedia.media.d.ts diff --git a/en/application-dev/application-models/widget-switch.md b/en/application-dev/application-models/widget-switch.md index 8d9823385a8a05f71c742327dc966054427a6718..2f8a0a41b2e1b20824a19d08c98f67e39cd05df6 100644 --- a/en/application-dev/application-models/widget-switch.md +++ b/en/application-dev/application-models/widget-switch.md @@ -11,7 +11,7 @@ Widget switching involves the following parts: | Configuration Item | FA Model | Stage Model | | ---------------- | ------------------------------------------- | ------------------------------------------------------------ | | Configuration item location | **formAbility** and **forms** are in the **config.json** file.| **extensionAbilities** (configuration for **formExtensionAbility**) is in the **module.json5** file in the level-1 directory, and **forms** (configuration for **forms** contained in **formExtensionAbility**) is in the **form_config.json** file in the level-2 directory.| -| Widget code path | Specified by **srcPath**, without the file name. | Specified by **srcEntrance**, with the file name. | +| Widget code path | Specified by **srcPath**, without the file name. | Specified by **srcEntry**, with the file name. | | Programming language | **srcLanguage** can be set to **js** or **ets**. | This configuration item is unavailable. Only ets is supported. | | Whether to enable widgets | formsEnabled | This configuration item is unavailable. The setting of **type** set to **form** means that the widgets are enabled. | | Ability type | type: service | type: form | @@ -32,7 +32,7 @@ Figure 2 Widget configuration differences | Item| FA Model| Stage Model| | -------- | -------- | -------- | -| Entry file| **form.ts** in the directory pointed to by **srcPath**| File pointed to by **srcEntrance**| +| Entry file| **form.ts** in the directory pointed to by **srcPath**| File pointed to by **srcEntry**| | Lifecycle| export default| import FormExtension from '\@ohos.app.form.FormExtensionAbility';
export default class FormAbility extends FormExtension| diff --git a/en/application-dev/application-models/windowextensionability.md b/en/application-dev/application-models/windowextensionability.md index 069897ad02435070ac470f5d2d3d528c76b417e8..0f6e28b89790cfa1dd7dc471ed3a450280f19a4a 100644 --- a/en/application-dev/application-models/windowextensionability.md +++ b/en/application-dev/application-models/windowextensionability.md @@ -1,12 +1,13 @@ # WindowExtensionAbility + [WindowExtensionAbility](../reference/apis/js-apis-application-windowExtensionAbility.md) is a type of ExtensionAbility component that allows a system application to be embedded in and displayed over another application. The WindowExtensionAbility component must be used together with the [AbilityComponent](../reference/arkui-ts/ts-container-ability-component.md) to process services of the started application. WindowExtensionAbility is run in connection mode. A system application must use the AbilityComponent to start the WindowExtensionAbility component. Each ExtensionAbility has its own context. For WindowExtensionAbility, -the context is [WindowExtensionContext](../reference/apis/js-apis-inner-application-windowExtensionContext.md). +the context is [WindowExtensionContext](../reference/apis/js-apis-inner-application-windowExtensionContext.md). > **NOTE** > @@ -14,7 +15,7 @@ the context is [WindowExtensionContext](../reference/apis/js-apis-inner-applicat > -## Setting an Embedded Ability (for System Applications Only) +## Setting an Embedded UIAbility (for System Applications Only) The **WindowExtensionAbility** class provides **onConnect()**, **onDisconnect()**, and **onWindowReady()** lifecycle callbacks, which can be overridden. @@ -58,7 +59,7 @@ To implement an embedded application, manually create a WindowExtensionAbility i } ``` -4. Register the WindowExtensionAbility in the [module.json5 file](../quick-start/module-configuration-file.md) corresponding to the **Module** project. Set **type** to **"window"** and **srcEntrance** to the code path of the ExtensionAbility component. +4. Register the WindowExtensionAbility in the [module.json5 file](../quick-start/module-configuration-file.md) corresponding to the **Module** project. Set **type** to **"window"** and **srcEntry** to the code path of the ExtensionAbility component. ```json { @@ -66,11 +67,11 @@ To implement an embedded application, manually create a WindowExtensionAbility i "extensionAbilities": [ { "name": "WindowExtAbility", - "srcEntrance": "./ets/WindowExtAbility/WindowExtAbility.ts", + "srcEntry": "./ets/WindowExtAbility/WindowExtAbility.ts", "icon": "$media:icon", "description": "WindowExtension", "type": "window", - "visible": true, + "exported": true, } ], } @@ -78,7 +79,7 @@ To implement an embedded application, manually create a WindowExtensionAbility i ``` -## Starting an Embedded Ability (for System Applications Only) +## Starting an Embedded UIAbility (for System Applications Only) System applications can load the created WindowExtensionAbility through the AbilityComponent. @@ -90,23 +91,23 @@ System applications can load the created WindowExtensionAbility through the Abil 3. Set the width and height. The sample code is as follows: -```ts -@Entry -@Component -struct Index { - @State message: string = 'Hello World' - - build() { - Row() { - Column() { - AbilityComponent({ abilityName: "WindowExtAbility", bundleName: "com.example.WindowExtAbility"}) - .width(500) - .height(500) - } - .width('100%') - } - .height('100%') - .backgroundColor(0x64BB5c) - } -} -``` + ```ts + @Entry + @Component + struct Index { + @State message: string = 'Hello World' + + build() { + Row() { + Column() { + AbilityComponent({ abilityName: "WindowExtAbility", bundleName: "com.example.WindowExtAbility"}) + .width(500) + .height(500) + } + .width('100%') + } + .height('100%') + .backgroundColor(0x64BB5c) + } + } + ``` \ No newline at end of file diff --git a/en/application-dev/connectivity/Readme-EN.md b/en/application-dev/connectivity/Readme-EN.md index 7176cb8fb438cbe8beec5b36bdd290c0b01bbd1f..59df854e8a37289cc9dcf55ed2f7d8110a7c84d0 100755 --- a/en/application-dev/connectivity/Readme-EN.md +++ b/en/application-dev/connectivity/Readme-EN.md @@ -9,6 +9,7 @@ - [Network Sharing](net-sharing.md) - [Ethernet Connection](net-ethernet.md) - [Network Connection Management](net-connection-manager.md) + - [mDNS Management](net-mdns.md) - IPC & RPC - [IPC & RPC Overview](ipc-rpc-overview.md) - [IPC & RPC Development](ipc-rpc-development-guideline.md) diff --git a/en/application-dev/connectivity/http-request.md b/en/application-dev/connectivity/http-request.md index 6204682cde551e1d9a952d8e19716cf342ce2d3d..223e40a97ecddcfa6b1613106d94ba942ee22006 100644 --- a/en/application-dev/connectivity/http-request.md +++ b/en/application-dev/connectivity/http-request.md @@ -30,7 +30,7 @@ The following table provides only a simple description of the related APIs. For | on\('dataProgress'\)10+ | Registers an observer for events indicating progress of receiving HTTP streaming responses. | | off\('dataProgress'\)10+ | Unregisters the observer for events indicating progress of receiving HTTP streaming responses.| -## How to Develop +## How to Develop request APIs 1. Import the **http** namespace from **@ohos.net.http.d.ts**. 2. Call **createHttp()** to create an **HttpRequest** object. @@ -49,43 +49,114 @@ let httpRequest = http.createHttp(); // This API is used to listen for the HTTP Response Header event, which is returned earlier than the result of the HTTP request. It is up to you whether to listen for HTTP Response Header events. // on('headerReceive', AsyncCallback) is replaced by on('headersReceive', Callback) since API version 8. httpRequest.on('headersReceive', (header) => { - console.info('header: ' + JSON.stringify(header)); + console.info('header: ' + JSON.stringify(header)); }); httpRequest.request( - // Customize EXAMPLE_URL in extraData on your own. It is up to you whether to add parameters to the URL. - "EXAMPLE_URL", - { - method: http.RequestMethod.POST, // Optional. The default value is http.RequestMethod.GET. - // You can add header fields based on service requirements. - header: { - 'Content-Type': 'application/json' - }, - // This field is used to transfer data when the POST request is used. - extraData: { - "data": "data to send", - }, - expectDataType: http.HttpDataType.STRING, // Optional. This field specifies the type of the return data. - usingCache: true, // Optional. The default value is true. - priority: 1, // Optional. The default value is 1. - connectTimeout: 60000 // Optional. The default value is 60000, in ms. - readTimeout: 60000, // Optional. The default value is 60000, in ms. - usingProtocol: http.HttpProtocol.HTTP1_1, // Optional. The default protocol type is automatically specified by the system. - usingProxy: false, // Optional. By default, network proxy is not used. This field is supported since API 10. - }, (err, data) => { - if (!err) { - // data.result carries the HTTP response. Parse the response based on service requirements. - console.info('Result:' + JSON.stringify(data.result)); - console.info('code:' + JSON.stringify(data.responseCode)); - // data.header carries the HTTP response header. Parse the content based on service requirements. - console.info('header:' + JSON.stringify(data.header)); - console.info('cookies:' + JSON.stringify(data.cookies)); // 8+ - } else { - console.info('error:' + JSON.stringify(err)); - // Unsubscribe from HTTP Response Header events. - httpRequest.off('headersReceive'); - // Call the destroy() method to release resources after HttpRequest is complete. - httpRequest.destroy(); - } + // Customize EXAMPLE_URL in extraData on your own. It is up to you whether to add parameters to the URL. + { + method: http.RequestMethod.POST, // Optional. The default value is http.RequestMethod.GET. + // You can add header fields based on service requirements. + header: { + 'Content-Type': 'application/json' + }, + // This field is used to transfer data when the POST request is used. + extraData: { + "data": "data to send", + }, + expectDataType: http.HttpDataType.STRING, // Optional. This field specifies the type of the return data. + usingCache: true, // Optional. The default value is true. + priority: 1, // Optional. The default value is 1. + connectTimeout: 60000 // Optional. The default value is 60000, in ms. + readTimeout: 60000, // Optional. The default value is 60000, in ms. + usingProtocol: http.HttpProtocol.HTTP1_1, // Optional. The default protocol type is automatically specified by the system. + usingProxy: false, // Optional. By default, network proxy is not used. This field is supported since API 10. + }, (err, data) => { + if (!err) { + // data.result carries the HTTP response. Parse the response based on service requirements. + console.info('Result:' + JSON.stringify(data.result)); + console.info('code:' + JSON.stringify(data.responseCode)); + // data.header carries the HTTP response header. Parse the content based on service requirements. + console.info('header:' + JSON.stringify(data.header)); + console.info('cookies:' + JSON.stringify(data.cookies)); // 8+ + } else { + console.info('error:' + JSON.stringify(err)); + // Unsubscribe from HTTP Response Header events. + httpRequest.off('headersReceive'); + // Call the destroy() method to release resources after HttpRequest is complete. + httpRequest.destroy(); } + } ); ``` + +## How to Develop request2 APIs + +1. Import the **http** namespace from **@ohos.net.http.d.ts**. +2. Call **createHttp()** to create an **HttpRequest** object. +3. Depending on your need, call **on()** of the **HttpRequest** object to subscribe to HTTP response header events as well as events indicating receiving of HTTP streaming responses, progress of receiving HTTP streaming responses, and completion of receiving HTTP streaming responses. +4. Call **request2()** to initiate a network request. You need to pass in the URL and optional parameters of the HTTP request. +5. Parse the returned response code as needed. +6. Call **off()** of the **HttpRequest** object to unsubscribe from the related events. +7. Call **httpRequest.destroy()** to release resources after the request is processed. + +```js +// Import the http namespace. +import http from '@ohos.net.http' + +// Each httpRequest corresponds to an HTTP request task and cannot be reused. +let httpRequest = http.createHttp(); +// Subscribe to HTTP response header events. +httpRequest.on('headersReceive', (header) => { + console.info('header: ' + JSON.stringify(header)); +}); +// Subscribe to events indicating receiving of HTTP streaming responses. +let res = ''; +httpRequest.on('dataReceive', (data) => { + res += data; + console.info('res: ' + res); +}); +// Subscribe to events indicating completion of receiving HTTP streaming responses. +httpRequest.on('dataEnd', () => { + console.info('No more data in response, data receive end'); +}); +// Subscribe to events indicating progress of receiving HTTP streaming responses. +httpRequest.on('dataProgress', (data) => { + console.log("dataProgress receiveSize:" + data.receiveSize + ", totalSize:" + data.totalSize); +}); + +httpRequest.request2( + // Customize EXAMPLE_URL in extraData on your own. It is up to you whether to add parameters to the URL. + "EXAMPLE_URL", + { + method: http.RequestMethod.POST, // Optional. The default value is http.RequestMethod.GET. + // You can add header fields based on service requirements. + header: { + 'Content-Type': 'application/json' + }, + // This field is used to transfer data when the POST request is used. + extraData: { + "data": "data to send", + }, + expectDataType: http.HttpDataType.STRING, // Optional. This field specifies the type of the return data. + usingCache: true, // Optional. The default value is true. + priority: 1, // Optional. The default value is 1. + connectTimeout: 60000 // Optional. The default value is 60000, in ms. + readTimeout: 60000, // Optional. The default value is 60000, in ms. If a large amount of data needs to be transmitted, you are advised to set this parameter to a larger value to ensure normal data transmission. + usingProtocol: http.HttpProtocol.HTTP1_1, // Optional. The default protocol type is automatically specified by the system. + }, (err, data) => { + console.info('error:' + JSON.stringify(err)); + console.info('ResponseCode :' + JSON.stringify(data)); + // Unsubscribe from HTTP Response Header events. + httpRequest.off('headersReceive'); + // Unregister the observer for events indicating receiving of HTTP streaming responses. + httpRequest.off('dataReceive'); + // Unregister the observer for events indicating progress of receiving HTTP streaming responses. + httpRequest.off('dataProgress'); + // Unregister the observer for events indicating completion of receiving HTTP streaming responses. + httpRequest.off('dataEnd'); + // Call the destroy() method to release resources after HttpRequest is complete. + httpRequest.destroy(); + } +); + +``` \ No newline at end of file diff --git a/en/application-dev/connectivity/net-connection-manager.md b/en/application-dev/connectivity/net-connection-manager.md index 1eddb3b5bbe47cb4d02123986647955d0492629e..69ce20a372c60c2bf0443db2697c5a65352bf34c 100644 --- a/en/application-dev/connectivity/net-connection-manager.md +++ b/en/application-dev/connectivity/net-connection-manager.md @@ -1,31 +1,38 @@ # Network Connection Management ## Introduction + The Network Connection Management module provides basic network management capabilities, including management of Wi-Fi/cellular/Ethernet connection priorities, network quality evaluation, subscription to network connection status changes, query of network connection information, and DNS resolution. > **NOTE** > To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-connection.md). ## Basic Concepts -- Producer: a provider of data networks, such as Wi-Fi, cellular, and Ethernet. -- Consumer: a user of data networks, for example, an application or a system service. -- Network probe: a mechanism used to detect the network availability to prevent the switch from an available network to an unavailable network. The probe type can be binding network detection, DNS detection, HTTP detection, or HTTPS detection. -- Network selection: a mechanism used to select the optimal network when multiple networks coexist. It is triggered when the network status, network information, or network quality evaluation score changes. + +- Producer: a provider of data networks, such as Wi-Fi, cellular, and Ethernet. +- Consumer: a user of data networks, for example, an application or a system service. +- Network probe: a mechanism used to detect the network availability to prevent the switch from an available network to an unavailable network. The probe type can be binding network detection, DNS detection, HTTP detection, or HTTPS detection. +- Network selection: a mechanism used to select the optimal network when multiple networks coexist. It is triggered when the network status, network information, or network quality evaluation score changes. ## **Constraints** -- Programming language: C++ and JS -- System: Linux kernel -- The initial APIs of this module are supported since API version 8. Newly added APIs will be marked with a superscript to indicate their earliest API version. + +- Programming language: C++ and JS +- System: Linux kernel +- The initial APIs of this module are supported since API version 8. Newly added APIs will be marked with a superscript to indicate their earliest API version. ## When to Use + Typical application scenarios of network connection management are as follows: -- Subscribing to status changes of the specified network -- Obtaining the list of all registered networks -- Querying network connection information based on the data network -- Resolving the domain name of a network to obtain all IP addresses + +- Subscribing to status changes of the specified network +- Obtaining the list of all registered networks +- Querying network connection information based on the data network +- Resolving the domain name of a network to obtain all IP addresses The following describes the development procedure specific to each application scenario. + ## Available APIs + For the complete list of APIs and example code, see [Network Connection Management](../reference/apis/js-apis-net-connection.md). | Type| API| Description| @@ -75,44 +82,46 @@ For the complete list of APIs and example code, see [Network Connection Manageme ```js // Import the connection namespace. - import connection from '@ohos.net.connection' - - let netCap = { - // Assume that the default network is Wi-Fi. If you need to create a cellular network connection, set the network type to CELLULAR. - bearerTypes: [connection.NetBearType.BEARER_CELLULAR], - // Set the network capability to INTERNET. - networkCap: [connection.NetCap.NET_CAPABILITY_INTERNET], - }; - let netSpec = { - netCapabilities: netCap, - }; - - // Set the timeout value to 10s. The default value is 0. - let timeout = 10 * 1000; - - // Create a NetConnection object. - let conn = connection.createNetConnection(netSpec, timeout); - - // Listen to network status change events. If the network is available, an on_netAvailable event is returned. - conn.on('netAvailable', (data=> { - console.log("net is available, netId is " + data.netId); - })); - - // Listen to network status change events. If the network is unavailable, an on_netUnavailable event is returned. - conn.on('netUnavailable', (data=> { - console.log("net is unavailable, netId is " + data.netId); - })); - - // Register an observer for network status changes. - conn.register((err, data) => {}); - - // Unregister the observer for network status changes. - conn.unregister((err, data) => {}); +import connection from '@ohos.net.connection' + +let netCap = { + // Assume that the default network is Wi-Fi. If you need to create a cellular network connection, set the network type to CELLULAR. + bearerTypes: [connection.NetBearType.BEARER_CELLULAR], + // Set the network capability to INTERNET. + networkCap: [connection.NetCap.NET_CAPABILITY_INTERNET], +}; +let netSpec = { + netCapabilities: netCap, +}; + +// Set the timeout value to 10s. The default value is 0. +let timeout = 10 * 1000; + +// Create a NetConnection object. +let conn = connection.createNetConnection(netSpec, timeout); + +// Listen to network status change events. If the network is available, an on_netAvailable event is returned. +conn.on('netAvailable', (data => { + console.log("net is available, netId is " + data.netId); +})); + +// Listen to network status change events. If the network is unavailable, an on_netUnavailable event is returned. +conn.on('netUnavailable', (data => { + console.log("net is unavailable, netId is " + data.netId); +})); + +// Register an observer for network status changes. +conn.register((err, data) => { +}); + +// Unregister the observer for network status changes. +conn.unregister((err, data) => { +}); ``` -## Obtaining the List of All Registered Networks +## Obtaining the List of All Registered Networks -### How to Develop +### How to Develop 1. Import the connection namespace from **@ohos.net.connection.d.ts**. @@ -120,21 +129,21 @@ For the complete list of APIs and example code, see [Network Connection Manageme ```js // Import the connection namespace. - import connection from '@ohos.net.connection' - - // Obtain the list of all connected networks. - connection.getAllNets((err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - if (data) { - this.netList = data; - } - }) +import connection from '@ohos.net.connection' + +// Obtain the list of all connected networks. +connection.getAllNets((err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); + if (data) { + this.netList = data; + } +}) ``` -## Querying Network Capability Information and Connection Information of Specified Data Network +## Querying Network Capability Information and Connection Information of Specified Data Network -### How to Develop +### How to Develop 1. Import the connection namespace from **@ohos.net.connection.d.ts**. @@ -146,89 +155,89 @@ For the complete list of APIs and example code, see [Network Connection Manageme ```js // Import the connection namespace. - import connection from '@ohos.net.connection' - - // Call getDefaultNet to obtain the default data network specified by **NetHandle**. - connection.getDefaultNet((err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - if (data) { - this.netHandle = data; - } - }) - - // Obtain the network capability information of the data network specified by **NetHandle**. The capability information includes information such as the network type and specific network capabilities. - connection.getNetCapabilities(this.netHandle, (err, data) => { - console.log(JSON.stringify(err)); - - // Obtain the network type via bearerTypes. - for (let item of data.bearerTypes) { - if (item == 0) { - // Cellular network - console.log(JSON.stringify("BEARER_CELLULAR")); - } else if (item == 1) { - // Wi-Fi network - console.log(JSON.stringify("BEARER_WIFI")); - } else if (item == 3) { - // Ethernet network - console.log(JSON.stringify("BEARER_ETHERNET")); - } - } - - // Obtain the specific network capabilities via networkCap. - for (let item of data.networkCap) { - if (item == 0) { - // The network can connect to the carrier's Multimedia Messaging Service Center (MMSC) to send and receive multimedia messages. - console.log(JSON.stringify("NET_CAPABILITY_MMS")); - } else if (item == 11) { - // The network traffic is not metered. - console.log(JSON.stringify("NET_CAPABILITY_NOT_METERED")); - } else if (item == 12) { - // The network has the Internet access capability, which is set by the network provider. - console.log(JSON.stringify("NET_CAPABILITY_INTERNET")); - } else if (item == 15) { - // The network does not use a Virtual Private Network (VPN). - console.log(JSON.stringify("NET_CAPABILITY_NOT_VPN")); - } else if (item == 16) { - // The Internet access capability of the network is successfully verified by the connection management module. - console.log(JSON.stringify("NET_CAPABILITY_VALIDATED")); - } - } - }) - - // Obtain the connection information of the data network specified by NetHandle. Connection information includes link and route information. - connection.getConnectionProperties(this.netHandle, (err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - }) - - // Call getAllNets to obtain the list of all connected networks via Array. - connection.getAllNets((err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - if (data) { - this.netList = data; - } - }) - - for (let item of this.netList) { - // Obtain the network capability information of the network specified by each netHandle on the network list cyclically. - connection.getNetCapabilities(item, (err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - }) - - // Obtain the connection information of the network specified by each netHandle on the network list cyclically. - connection.getConnectionProperties(item, (err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - }) - } +import connection from '@ohos.net.connection' + +// Call getDefaultNet to obtain the default data network specified by **NetHandle**. +connection.getDefaultNet((err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); + if (data) { + this.netHandle = data; + } +}) + +// Obtain the network capability information of the data network specified by **NetHandle**. The capability information includes information such as the network type and specific network capabilities. +connection.getNetCapabilities(this.netHandle, (err, data) => { + console.log(JSON.stringify(err)); + + // Obtain the network type via bearerTypes. + for (let item of data.bearerTypes) { + if (item == 0) { + // Cellular network + console.log(JSON.stringify("BEARER_CELLULAR")); + } else if (item == 1) { + // Wi-Fi network + console.log(JSON.stringify("BEARER_WIFI")); + } else if (item == 3) { + // Ethernet network + console.log(JSON.stringify("BEARER_ETHERNET")); + } + } + + // Obtain the specific network capabilities via networkCap. + for (let item of data.networkCap) { + if (item == 0) { + // The network can connect to the carrier's Multimedia Messaging Service Center (MMSC) to send and receive multimedia messages. + console.log(JSON.stringify("NET_CAPABILITY_MMS")); + } else if (item == 11) { + // The network traffic is not metered. + console.log(JSON.stringify("NET_CAPABILITY_NOT_METERED")); + } else if (item == 12) { + // The network has the Internet access capability, which is set by the network provider. + console.log(JSON.stringify("NET_CAPABILITY_INTERNET")); + } else if (item == 15) { + // The network does not use a Virtual Private Network (VPN). + console.log(JSON.stringify("NET_CAPABILITY_NOT_VPN")); + } else if (item == 16) { + // The Internet access capability of the network is successfully verified by the connection management module. + console.log(JSON.stringify("NET_CAPABILITY_VALIDATED")); + } + } +}) + +// Obtain the connection information of the data network specified by NetHandle. Connection information includes link and route information. +connection.getConnectionProperties(this.netHandle, (err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); +}) + +// Call getAllNets to obtain the list of all connected networks via Array. +connection.getAllNets((err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); + if (data) { + this.netList = data; + } +}) + +for (let item of this.netList) { + // Obtain the network capability information of the network specified by each netHandle on the network list cyclically. + connection.getNetCapabilities(item, (err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); + }) + + // Obtain the connection information of the network specified by each netHandle on the network list cyclically. + connection.getConnectionProperties(item, (err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); + }) +} ``` -## Resolving the domain name of a network to obtain all IP addresses +## Resolving the domain name of a network to obtain all IP addresses -### How to Develop +### How to Develop 1. Import the connection namespace from **@ohos.net.connection.d.ts**. @@ -236,11 +245,11 @@ For the complete list of APIs and example code, see [Network Connection Manageme ```js // Import the connection namespace. - import connection from '@ohos.net.connection' +import connection from '@ohos.net.connection' - // Use the default network to resolve the host name to obtain the list of all IP addresses. - connection.getAddressesByName(this.host, (err, data) => { - console.log(JSON.stringify(err)); - console.log(JSON.stringify(data)); - }) +// Use the default network to resolve the host name to obtain the list of all IP addresses. +connection.getAddressesByName(this.host, (err, data) => { + console.log(JSON.stringify(err)); + console.log(JSON.stringify(data)); +}) ``` diff --git a/en/application-dev/connectivity/net-ethernet.md b/en/application-dev/connectivity/net-ethernet.md index 85c4ef4fc15f4c2228eb8351ddb5cd730ff5fe94..f1891594166c9ecf3688b93d78cdece890796d35 100644 --- a/en/application-dev/connectivity/net-ethernet.md +++ b/en/application-dev/connectivity/net-ethernet.md @@ -1,25 +1,29 @@ # Ethernet Connection ## Introduction -The Ethernet Connection module allows a device to access the Internet through a network cable. -After a device is connected to the Ethernet through a network cable, the device can obtain a series of network attributes, such as the dynamically allocated IP address, subnet mask, gateway, and DNS. You can manually configure and obtain the network attributes of the device in static mode. + +The Ethernet Connection module allows a device to access the Internet through a network cable. After a device is connected to the Ethernet through a network cable, the device can obtain a series of network attributes, such as the dynamically allocated IP address, subnet mask, gateway, and DNS. You can manually configure and obtain the network attributes of the device in static mode. > **NOTE** > To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-ethernet.md). ## **Constraints** -- Programming language: C++ and JS -- System: Linux kernel -- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version. + +- Programming language: C++ and JS +- System: Linux kernel +- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version. ## When to Use + Typical application scenarios of Ethernet connection are as follows: -- Dynamically assigning a series of network attributes, such as the IP address, subnet mask, gateway, and DNS in DHCP mode to enable network access -- Configuring a series of network attributes, such as the IP address, subnet mask, gateway, and DNS, in static mode to enable network access. + +- Dynamically assigning a series of network attributes, such as the IP address, subnet mask, gateway, and DNS in DHCP mode to enable network access +- Configuring a series of network attributes, such as the IP address, subnet mask, gateway, and DNS, in static mode to enable network access. The following describes the development procedure specific to each application scenario. ## Available APIs + For the complete list of APIs and example code, see [Ethernet Connection](../reference/apis/js-apis-net-ethernet.md). | Type| API| Description| @@ -28,6 +32,8 @@ For the complete list of APIs and example code, see [Ethernet Connection](../ref | ohos.net.ethernet | function getIfaceConfig(iface: string, callback: AsyncCallback\): void | Obtains the network attributes of the specified Ethernet network. This API uses an asynchronous callback to return the result.| | ohos.net.ethernet | function isIfaceActive(iface: string, callback: AsyncCallback\): void | Checks whether the specified network port is active. This API uses an asynchronous callback to return the result.| | ohos.net.ethernet | function getAllActiveIfaces(callback: AsyncCallback\>): void; | Obtains the list of all active network ports. This API uses an asynchronous callback to return the result.| +| ohos.net.ethernet | function on(type: 'interfaceStateChange', callback: Callback\<{ iface: string, active: boolean }\>): void; | Subscribes to interface state change events.| +| ohos.net.ethernet | function off(type: 'interfaceStateChange', callback?: Callback\<{ iface: string, active: boolean }\>): void; | Unsubscribes from interface state change events.| ## Ethernet Connection – DHCP Mode @@ -39,44 +45,45 @@ For the complete list of APIs and example code, see [Ethernet Connection](../ref ```js // Import the ethernet namespace from @ohos.net.ethernet. - import ethernet from '@ohos.net.ethernet' - - // Call getAllActiveIfaces to obtain the list of all active network ports. - ethernet.getAllActiveIfaces((error, data) => { - if (error) { - console.log("getAllActiveIfaces callback error = " + error); - } else { - console.log("getAllActiveIfaces callback data.length = " + data.length); - for (let i = 0; i < data.length; i++) { - console.log("getAllActiveIfaces callback = " + data[i]); - } - } - }); - - // Call isIfaceActive to check whether the specified network port is active. - ethernet.isIfaceActive("eth0", (error, data) => { - if (error) { - console.log("isIfaceActive callback error = " + error); - } else { - console.log("isIfaceActive callback = " + data); - } - }); - - // Call getIfaceConfig to obtain the network attributes of the specified Ethernet network. - ethernet.getIfaceConfig("eth0", (error, data) => { - if (error) { - console.log("getIfaceConfig callback error = " + error); - } else { - console.log("getIfaceConfig callback mode = " + data.mode); - console.log("getIfaceConfig callback ipAddr = " + data.ipAddr); - console.log("getIfaceConfig callback routeAddr = " + data.routeAddr); - console.log("getIfaceConfig callback gateAddr = " + data.gateAddr); - console.log("getIfaceConfig callback maskAddr = " + data.maskAddr); - console.log("getIfaceConfig callback dns0Addr = " + data.dns0Addr); - console.log("getIfaceConfig callback dns1Addr = " + data.dns1Addr); - } - }); +import ethernet from '@ohos.net.ethernet' + +// Call getAllActiveIfaces to obtain the list of all active network ports. +ethernet.getAllActiveIfaces((error, data) => { + if (error) { + console.log("getAllActiveIfaces callback error = " + error); + } else { + console.log("getAllActiveIfaces callback data.length = " + data.length); + for (let i = 0; i < data.length; i++) { + console.log("getAllActiveIfaces callback = " + data[i]); + } + } +}); + +// Call isIfaceActive to check whether the specified network port is active. +ethernet.isIfaceActive("eth0", (error, data) => { + if (error) { + console.log("isIfaceActive callback error = " + error); + } else { + console.log("isIfaceActive callback = " + data); + } +}); + +// Call getIfaceConfig to obtain the network attributes of the specified Ethernet network. +ethernet.getIfaceConfig("eth0", (error, data) => { + if (error) { + console.log("getIfaceConfig callback error = " + error); + } else { + console.log("getIfaceConfig callback mode = " + data.mode); + console.log("getIfaceConfig callback ipAddr = " + data.ipAddr); + console.log("getIfaceConfig callback routeAddr = " + data.routeAddr); + console.log("getIfaceConfig callback gateAddr = " + data.gateAddr); + console.log("getIfaceConfig callback maskAddr = " + data.maskAddr); + console.log("getIfaceConfig callback dns0Addr = " + data.dns0Addr); + console.log("getIfaceConfig callback dns1Addr = " + data.dns1Addr); + } +}); ``` + ## Ethernet Connection – Static Mode ### How to Develop @@ -90,51 +97,75 @@ For the complete list of APIs and example code, see [Ethernet Connection](../ref ```js // Import the ethernet namespace from @ohos.net.ethernet. - import ethernet from '@ohos.net.ethernet' - - // Call getAllActiveIfaces to obtain the list of all active network ports. - ethernet.getAllActiveIfaces((error, data) => { - if (error) { - console.log("getAllActiveIfaces callback error = " + error); - } else { - console.log("getAllActiveIfaces callback data.length = " + data.length); - for (let i = 0; i < data.length; i++) { - console.log("getAllActiveIfaces callback = " + data[i]); - } - } - }); - - // Call isIfaceActive to check whether the specified network port is active. - ethernet.isIfaceActive("eth0", (error, data) => { - if (error) { - console.log("isIfaceActive callback error = " + error); - } else { - console.log("isIfaceActive callback = " + data); - } - }); - - // Call setIfaceConfig to configure the network attributes of the specified Ethernet network. - ethernet.setIfaceConfig("eth0", {mode:ethernet.STATIC,ipAddr:"192.168.xx.xx", routeAddr:"192.168.xx.xx", - gateAddr:"192.168.xx.xx", maskAddr:"255.255.xx.xx", dnsAddr0:"1.1.xx.xx", dnsAddr1:"2.2.xx.xx"},(error) => { - if (error) { - console.log("setIfaceConfig callback error = " + error); - } else { - console.log("setIfaceConfig callback ok "); - } - }); - - // Call getIfaceConfig to obtain the network attributes of the specified Ethernet network. - ethernet.getIfaceConfig("eth0", (error, data) => { - if (error) { - console.log("getIfaceConfig callback error = " + error); - } else { - console.log("getIfaceConfig callback mode = " + data.mode); - console.log("getIfaceConfig callback ipAddr = " + data.ipAddr); - console.log("getIfaceConfig callback routeAddr = " + data.routeAddr); - console.log("getIfaceConfig callback gateAddr = " + data.gateAddr); - console.log("getIfaceConfig callback maskAddr = " + data.maskAddr); - console.log("getIfaceConfig callback dns0Addr = " + data.dns0Addr); - console.log("getIfaceConfig callback dns1Addr = " + data.dns1Addr); - } - }); +import ethernet from '@ohos.net.ethernet' + +// Call getAllActiveIfaces to obtain the list of all active network ports. +ethernet.getAllActiveIfaces((error, data) => { + if (error) { + console.log("getAllActiveIfaces callback error = " + error); + } else { + console.log("getAllActiveIfaces callback data.length = " + data.length); + for (let i = 0; i < data.length; i++) { + console.log("getAllActiveIfaces callback = " + data[i]); + } + } +}); + +// Call isIfaceActive to check whether the specified network port is active. +ethernet.isIfaceActive("eth0", (error, data) => { + if (error) { + console.log("isIfaceActive callback error = " + error); + } else { + console.log("isIfaceActive callback = " + data); + } +}); + +// Call setIfaceConfig to configure the network attributes of the specified Ethernet network. +ethernet.setIfaceConfig("eth0", { + mode: ethernet.STATIC, ipAddr: "192.168.xx.xx", routeAddr: "192.168.xx.xx", + gateAddr: "192.168.xx.xx", maskAddr: "255.255.xx.xx", dnsAddr0: "1.1.xx.xx", dnsAddr1: "2.2.xx.xx" +}, (error) => { + if (error) { + console.log("setIfaceConfig callback error = " + error); + } else { + console.log("setIfaceConfig callback ok "); + } +}); + +// Call getIfaceConfig to obtain the network attributes of the specified Ethernet network. +ethernet.getIfaceConfig("eth0", (error, data) => { + if (error) { + console.log("getIfaceConfig callback error = " + error); + } else { + console.log("getIfaceConfig callback mode = " + data.mode); + console.log("getIfaceConfig callback ipAddr = " + data.ipAddr); + console.log("getIfaceConfig callback routeAddr = " + data.routeAddr); + console.log("getIfaceConfig callback gateAddr = " + data.gateAddr); + console.log("getIfaceConfig callback maskAddr = " + data.maskAddr); + console.log("getIfaceConfig callback dns0Addr = " + data.dns0Addr); + console.log("getIfaceConfig callback dns1Addr = " + data.dns1Addr); + } +}); +``` + +## Subscribes the status change of network device interfaces. + +### How to Develop + +1. Import the **ethernet** namespace from **@ohos.net.ethernet**. +2. Call the **on()** method to subscribe to **interfaceStateChange** events. It is up to you whether to listen for **interfaceStateChange** events. +3. Check whether an **interfaceStateChange** event is triggered when the interface state changes. +4. Call the **off()** method to unsubscribe from **interfaceStateChange** events. + +```js + // Import the ethernet namespace from @ohos.net.ethernet. +import ethernet from '@ohos.net.ethernet' + +// Subscribe to interfaceStateChange events. +ethernet.on('interfaceStateChange', ((data) => { + console.log(JSON.stringify(data)); +})); + +// Unsubscribe from interfaceStateChange events. +ethernet.off('interfaceStateChange'); ``` diff --git a/en/application-dev/connectivity/net-mdns.md b/en/application-dev/connectivity/net-mdns.md new file mode 100644 index 0000000000000000000000000000000000000000..16aa29609d0826388b244a7daebbcb1f849ed27e --- /dev/null +++ b/en/application-dev/connectivity/net-mdns.md @@ -0,0 +1,156 @@ +# MDNS Management + +## Introduction + +Multicast DNS (mDNS) provides functions such as adding, removing, discovering, and resolving local services on a LAN. +- Local service: a service provider on a LAN, for example, a printer or scanner. + +Typical MDNS management scenarios include: + +- Managing local services on a LAN, such as adding, removing, and resolving local services. +- Discovering local services and listening to the status changes of local services of the specified type through the **DiscoveryService** object. + +> **NOTE** +> To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [mDNS Management](../reference/apis/js-apis-net-mdns.md). + +The following describes the development procedure specific to each application scenario. + +## Available APIs + +For the complete list of APIs and example code, see [mDNS Management](../reference/apis/js-apis-net-mdns.md). + +| Type| API| Description| +| ---- | ---- | ---- | +| ohos.net.mdns | addLocalService(context: Context, serviceInfo: LocalServiceInfo, callback: AsyncCallback\): void | Adds an mDNS service. This API uses an asynchronous callback to return the result.| +| ohos.net.mdns | removeLocalService(context: Context, serviceInfo: LocalServiceInfo, callback: AsyncCallback\): void | Removes an mDNS service. This API uses an asynchronous callback to return the result.| +| ohos.net.mdns | createDiscoveryService(context: Context, serviceType: string): DiscoveryService | Creates a **DiscoveryService** object, which is used to discover mDNS services of the specified type.| +| ohos.net.mdns | resolveLocalService(context: Context, serviceInfo: LocalServiceInfo, callback: AsyncCallback\): void | Resolves an mDNS service. This API uses an asynchronous callback to return the result.| +| ohos.net.mdns.DiscoveryService | startSearchingMDNS(): void | Searches for mDNS services on the LAN.| +| ohos.net.mdns.DiscoveryService | stopSearchingMDNS(): void | Stops searching for mDNS services on the LAN.| +| ohos.net.mdns.DiscoveryService | on(type: 'discoveryStart', callback: Callback<{serviceInfo: LocalServiceInfo, errorCode?: MdnsError}>): void | Enables listening for **discoveryStart** events.| +| ohos.net.mdns.DiscoveryService | on(type: 'discoveryStop', callback: Callback<{serviceInfo: LocalServiceInfo, errorCode?: MdnsError}>): void | Enables listening for **discoveryStop** events.| +| ohos.net.mdns.DiscoveryService | on(type: 'serviceFound', callback: Callback\): void | Enables listening for **serviceFound** events.| +| ohos.net.mdns.DiscoveryService | on(type: 'serviceLost', callback: Callback\): void | Enables listening for **serviceLost** events.| + +## Managing Local Services + +1. Connect the device to the Wi-Fi network. +2. Import the **mdns** namespace from **@ohos.net.mdns**. +3. Call **addLocalService** to add a local service. +4. (Optional) Call **resolveLocalService** to resolve the local service for the IP address of the local network. +5. Call **removeLocalService** to remove the local service. + +```js +// Import the mdns namespace from @ohos.net.mdns. +import mdns from '@ohos.net.mdns' + +// Obtain the context of the FA model. +import featureAbility from '@ohos.ability.featureAbility'; +let context = featureAbility.getContext(); + +// Obtain the context of the stage model. +import UIAbility from '@ohos.app.ability.UIAbility'; +class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage){ + globalThis.context = this.context; + } +} +let context = globalThis.context; + +// Create a LocalService object. +let localServiceInfo = { + serviceType: "_print._tcp", + serviceName: "servicename", + port: 5555, + host: { + address: "10.14.**.***", + }, + serviceAttribute: [{ + key: "111", + value: [1] + }] +} + +// Call addLocalService to add a local service. +mdns.addLocalService(context, localServiceInfo, function (error, data) { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); + +// (Optional) Call resolveLocalService to resolve the local service. +mdns.resolveLocalService(context, localServiceInfo, function (error, data) { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); + +// Call removeLocalService to remove the local service. +mdns.removeLocalService(context, localServiceInfo, function (error, data) { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); +``` + +## Discovering Local Services + +1. Connect the device to the Wi-Fi network. +2. Import the **mdns** namespace from **@ohos.net.mdns**. +3. Create a **DiscoveryService** object, which is used to discover mDNS services of the specified type. +4. Subscribe to mDNS service discovery status changes. +5. Enable discovery of mDNS services on the LAN. +6. Stop searching for mDNS services on the LAN. + +```js +// Import the mdns namespace from @ohos.net.mdns. +import mdns from '@ohos.net.mdns' + +// Obtain the context of the FA model. +import featureAbility from '@ohos.ability.featureAbility'; +let context = featureAbility.getContext(); + +// Obtain the context of the stage model. +import UIAbility from '@ohos.app.ability.UIAbility'; +class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage){ + globalThis.context = this.context; + } +} +let context = globalThis.context; + +// Create a LocalService object. +let localServiceInfo = { + serviceType: "_print._tcp", + serviceName: "servicename", + port: 5555, + host: { + address: "10.14.**.***", + }, + serviceAttribute: [{ + key: "111", + value: [1] + }] +} + +// Create a DiscoveryService object, which is used to discover mDNS services of the specified type. +let serviceType = "_print._tcp"; +let discoveryService = mdns.createDiscoveryService(context, serviceType); + +// Subscribe to mDNS service discovery status changes. +discoveryService.on('discoveryStart', (data) => { + console.log(JSON.stringify(data)); +}); +discoveryService.on('discoveryStop', (data) => { + console.log(JSON.stringify(data)); +}); +discoveryService.on('serviceFound', (data) => { + console.log(JSON.stringify(data)); +}); +discoveryService.on('serviceLost', (data) => { + console.log(JSON.stringify(data)); +}); + +// Enable discovery of mDNS services on the LAN. +discoveryService.startSearchingMDNS(); + +// Stop searching for mDNS services on the LAN. +discoveryService.stopSearchingMDNS(); +``` diff --git a/en/application-dev/connectivity/net-mgmt-overview.md b/en/application-dev/connectivity/net-mgmt-overview.md index 0ad30c35cc9b4d5e90b2c8fe90cac7ca2e413a57..043d41768f89dd851839eae893b7ba4409395f5e 100644 --- a/en/application-dev/connectivity/net-mgmt-overview.md +++ b/en/application-dev/connectivity/net-mgmt-overview.md @@ -2,13 +2,14 @@ Network management functions include: -- [HTTP data request](http-request.md): Initiates a data request through HTTP. -- [WebSocket connection](websocket-connection.md): Establishes a bidirectional connection between the server and client through WebSocket. -- [Socket connection](socket-connection.md): Transmits data through Socket. -- [Network policy management](net-policy-management.md): Restricts network capabilities by setting network policies, including cellular network policy, sleep/power-saving mode policy, and background network policy, and resets network policies as needed. -- [Network sharing](net-sharing.md): Shares a device's Internet connection with other connected devices by means of Wi-Fi hotspot, Bluetooth, and USB sharing, and queries the network sharing state and shared mobile data volume. -- [Ethernet connection](net-ethernet.md): Provides wired network capabilities, which allow you to set the IP address, subnet mask, gateway, and Domain Name System (DNS) server of a wired network. -- [Network connection management](net-connection-manager.md): Provides basic network management capabilities, including management of Wi-Fi/cellular/Ethernet connection priorities, network quality evaluation, subscription to network connection status changes, query of network connection information, and DNS resolution. +- [HTTP data request](http-request.md): initiates a data request through HTTP. +- [WebSocket connection](websocket-connection.md): establishes a bidirectional connection between the server and client through WebSocket. +- [Socket connection](socket-connection.md): transmits data through Socket. +- [Network policy management](net-policy-management.md): restricts network capabilities by setting network policies, including cellular network policy, sleep/power-saving mode policy, and background network policy, and resets network policies as needed. +- [Network sharing](net-sharing.md): shares a device's Internet connection with other connected devices by means of Wi-Fi hotspot, Bluetooth, and USB sharing, and queries the network sharing state and shared mobile data volume. +- [Ethernet connection](net-ethernet.md): provides wired network capabilities, which allow you to set the IP address, subnet mask, gateway, and Domain Name System (DNS) server of a wired network. +- [Network connection management](net-connection-manager.md): provides basic network management capabilities, including management of Wi-Fi/cellular/Ethernet connection priorities, network quality evaluation, subscription to network connection status changes, query of network connection information, and DNS resolution. +- [mDNS management](net-mdns.md): provides Multicast DNS (mDNS) management capabilities, such as adding, removing, discovering, and resolving local services on a LAN. ## Constraints diff --git a/en/application-dev/connectivity/net-sharing.md b/en/application-dev/connectivity/net-sharing.md index d5bc9cf2f8817723f0f23d666c45997a6735f706..331ffec3b1a1e0047c39e2fe416ad5c05e913b61 100644 --- a/en/application-dev/connectivity/net-sharing.md +++ b/en/application-dev/connectivity/net-sharing.md @@ -1,29 +1,36 @@ # Network Sharing ## Introduction + The Network Sharing module allows you to share your device's Internet connection with other connected devices by means of Wi-Fi hotspot, Bluetooth, and USB sharing. It also allows you to query the network sharing state and shared mobile data volume. > **NOTE** > To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-sharing.md). ## Basic Concepts -- Wi-Fi sharing: Shares the network through a Wi-Fi hotspot. -- Bluetooth sharing: Shares the network through Bluetooth. -- USB tethering: Shares the network using a USB flash drive. + +- Wi-Fi sharing: Shares the network through a Wi-Fi hotspot. +- Bluetooth sharing: Shares the network through Bluetooth. +- USB tethering: Shares the network using a USB flash drive. ## **Constraints** -- Programming language: C++ and JS -- System: Linux kernel -- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version. + +- Programming language: C++ and JS +- System: Linux kernel +- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version. ## When to Use + Typical network sharing scenarios are as follows: -- Enabling network sharing -- Disabling network sharing -- Obtaining the data traffic of the shared network + +- Enabling network sharing +- Disabling network sharing +- Obtaining the data traffic of the shared network The following describes the development procedure specific to each application scenario. + ## Available APIs + For the complete list of APIs and example code, see [Network Sharing](../reference/apis/js-apis-net-sharing.md). | Type| API| Description| @@ -54,18 +61,18 @@ For the complete list of APIs and example code, see [Network Sharing](../referen ```js // Import the sharing namespace from @ohos.net.sharing. - import sharing from '@ohos.net.sharing' - - // Subscribe to network sharing state changes. - sharing.on('sharingStateChange', (error, data) => { - console.log(JSON.stringify(error)); - console.log(JSON.stringify(data)); - }); - - // Call startSharing to start network sharing of the specified type. - sharing.startSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { - console.log(JSON.stringify(error)); - }); +import sharing from '@ohos.net.sharing' + +// Subscribe to network sharing state changes. +sharing.on('sharingStateChange', (error, data) => { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); + +// Call startSharing to start network sharing of the specified type. +sharing.startSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { + console.log(JSON.stringify(error)); +}); ``` ## Disabling network sharing @@ -79,18 +86,18 @@ For the complete list of APIs and example code, see [Network Sharing](../referen ```js // Import the sharing namespace from @ohos.net.sharing. - import sharing from '@ohos.net.sharing' - - // Subscribe to network sharing state changes. - sharing.on('sharingStateChange', (error, data) => { - console.log(JSON.stringify(error)); - console.log(JSON.stringify(data)); - }); - - // Call stopSharing to stop network sharing of the specified type. - sharing.stopSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { - console.log(JSON.stringify(error)); - }); +import sharing from '@ohos.net.sharing' + +// Subscribe to network sharing state changes. +sharing.on('sharingStateChange', (error, data) => { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); + +// Call stopSharing to stop network sharing of the specified type. +sharing.stopSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { + console.log(JSON.stringify(error)); +}); ``` ## Obtaining the data traffic of the shared network @@ -104,27 +111,27 @@ For the complete list of APIs and example code, see [Network Sharing](../referen ```js // Import the sharing namespace from @ohos.net.sharing. - import sharing from '@ohos.net.sharing' - - // Call startSharing to start network sharing of the specified type. - sharing.startSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { - console.log(JSON.stringify(error)); - }); - - // Call getStatsTotalBytes to obtain the data traffic generated during data sharing. - sharing.getStatsTotalBytes((error, data) => { - console.log(JSON.stringify(error)); - console.log(JSON.stringify(data)); - }); - - // Call stopSharing to stop network sharing of the specified type and clear the data volume of network sharing. - sharing.stopSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { - console.log(JSON.stringify(error)); - }); - - // Call getStatsTotalBytes again. The data volume of network sharing has been cleared. - sharing.getStatsTotalBytes((error, data) => { - console.log(JSON.stringify(error)); - console.log(JSON.stringify(data)); - }); +import sharing from '@ohos.net.sharing' + +// Call startSharing to start network sharing of the specified type. +sharing.startSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { + console.log(JSON.stringify(error)); +}); + +// Call getStatsTotalBytes to obtain the data traffic generated during data sharing. +sharing.getStatsTotalBytes((error, data) => { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); + +// Call stopSharing to stop network sharing of the specified type and clear the data volume of network sharing. +sharing.stopSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => { + console.log(JSON.stringify(error)); +}); + +// Call getStatsTotalBytes again. The data volume of network sharing has been cleared. +sharing.getStatsTotalBytes((error, data) => { + console.log(JSON.stringify(error)); + console.log(JSON.stringify(data)); +}); ``` diff --git a/en/application-dev/connectivity/socket-connection.md b/en/application-dev/connectivity/socket-connection.md index 5cae73b2a5c84f280aea80e299605ee80ac2553a..ea2a3ba6dcfb849ea64971503e246e94b16f34a1 100644 --- a/en/application-dev/connectivity/socket-connection.md +++ b/en/application-dev/connectivity/socket-connection.md @@ -186,136 +186,136 @@ TLS Socket connection process on the client: ```js import socket from '@ohos.net.socket' - // Create a TLS Socket connection (for two-way authentication). - let tlsTwoWay = socket.constructTLSSocketInstance(); - - // Subscribe to TLS Socket connection events. - tlsTwoWay.on('message', value => { - console.log("on message") - let buffer = value.message - let dataView = new DataView(buffer) - let str = "" - for (let i = 0; i < dataView.byteLength; ++i) { - str += String.fromCharCode(dataView.getUint8(i)) - } - console.log("on connect received:" + str) - }); - tlsTwoWay.on('connect', () => { - console.log("on connect") - }); - tlsTwoWay.on('close', () => { - console.log("on close") - }); - - // Bind the local IP address and port number. - tlsTwoWay.bind({address: '192.168.xxx.xxx', port: xxxx, family: 1}, err => { - if (err) { - console.log('bind fail'); - return; - } - console.log('bind success'); - }); - - // Set the communication parameters. - let options = { - ALPNProtocols: ["spdy/1", "http/1.1"], - - // Set up a connection to the specified IP address and port number. - address: { - address: "192.168.xx.xxx", - port: xxxx, // Port - family: 1, - }, - - // Set the parameters used for authentication during communication. - secureOptions: { - key: "xxxx", // Key - cert: "xxxx", // Digital certificate - ca: ["xxxx"], // CA certificate - passwd: "xxxx", // Password for generating the key - protocols: [socket.Protocol.TLSv12], // Communication protocol - useRemoteCipherPrefer: true, // Whether to preferentially use the peer cipher suite - signatureAlgorithms: "rsa_pss_rsae_sha256:ECDSA+SHA256", // Signature algorithm - cipherSuite: "AES256-SHA256", // Cipher suite - }, - }; - - // Set up a connection. - tlsTwoWay.connect(options, (err, data) => { - console.error(err); - console.log(data); - }); - - // Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events. - tlsTwoWay.close((err) => { - if (err) { - console.log("close callback error = " + err); - } else { - console.log("close success"); - } - tlsTwoWay.off('message'); - tlsTwoWay.off('connect'); - tlsTwoWay.off('close'); - }); - - // Create a TLS Socket connection (for one-way authentication). - let tlsOneWay = socket.constructTLSSocketInstance(); // One way authentication - - // Subscribe to TLS Socket connection events. - tlsTwoWay.on('message', value => { - console.log("on message") - let buffer = value.message - let dataView = new DataView(buffer) - let str = "" - for (let i = 0;i < dataView.byteLength; ++i) { - str += String.fromCharCode(dataView.getUint8(i)) - } - console.log("on connect received:" + str) - }); - tlsTwoWay.on('connect', () => { - console.log("on connect") - }); - tlsTwoWay.on('close', () => { - console.log("on close") - }); - - // Bind the local IP address and port number. - tlsOneWay.bind({address: '192.168.xxx.xxx', port: xxxx, family: 1}, err => { - if (err) { - console.log('bind fail'); - return; - } - console.log('bind success'); - }); - - // Set the communication parameters. - let oneWayOptions = { - address: { - address: "192.168.xxx.xxx", - port: xxxx, - family: 1, - }, - secureOptions: { - ca: ["xxxx","xxxx"], // CA certificate - cipherSuite: "AES256-SHA256", // Cipher suite - }, - }; - - // Set up a connection. - tlsOneWay.connect(oneWayOptions, (err, data) => { - console.error(err); - console.log(data); - }); - - // Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events. - tlsTwoWay.close((err) => { - if (err) { - console.log("close callback error = " + err); - } else { - console.log("close success"); - } - tlsTwoWay.off('message'); - tlsTwoWay.off('connect'); - tlsTwoWay.off('close'); - }); -``` +// Create a TLS Socket connection (for two-way authentication). +let tlsTwoWay = socket.constructTLSSocketInstance(); + +// Subscribe to TLS Socket connection events. +tlsTwoWay.on('message', value => { + console.log("on message") + let buffer = value.message + let dataView = new DataView(buffer) + let str = "" + for (let i = 0; i < dataView.byteLength; ++i) { + str += String.fromCharCode(dataView.getUint8(i)) + } + console.log("on connect received:" + str) +}); +tlsTwoWay.on('connect', () => { + console.log("on connect") +}); +tlsTwoWay.on('close', () => { + console.log("on close") +}); + +// Bind the local IP address and port number. +tlsTwoWay.bind({address: '192.168.xxx.xxx', port: xxxx, family: 1}, err => { + if (err) { + console.log('bind fail'); + return; + } + console.log('bind success'); +}); + +// Set the communication parameters. +let options = { + ALPNProtocols: ["spdy/1", "http/1.1"], + + // Set up a connection to the specified IP address and port number. + address: { + address: "192.168.xx.xxx", + port: xxxx, // Port + family: 1, + }, + + // Set the parameters used for authentication during communication. + secureOptions: { + key: "xxxx", // Key + cert: "xxxx", // Digital certificate + ca: ["xxxx"], // CA certificate + passwd: "xxxx", // Password for generating the key + protocols: [socket.Protocol.TLSv12], // Communication protocol + useRemoteCipherPrefer: true, // Whether to preferentially use the peer cipher suite + signatureAlgorithms: "rsa_pss_rsae_sha256:ECDSA+SHA256", // Signature algorithm + cipherSuite: "AES256-SHA256", // Cipher suite + }, +}; + +// Set up a connection. +tlsTwoWay.connect(options, (err, data) => { + console.error(err); + console.log(data); +}); + +// Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events. +tlsTwoWay.close((err) => { + if (err) { + console.log("close callback error = " + err); + } else { + console.log("close success"); + } + tlsTwoWay.off('message'); + tlsTwoWay.off('connect'); + tlsTwoWay.off('close'); +}); + +// Create a TLS Socket connection (for one-way authentication). +let tlsOneWay = socket.constructTLSSocketInstance(); // One way authentication + +// Subscribe to TLS Socket connection events. +tlsTwoWay.on('message', value => { + console.log("on message") + let buffer = value.message + let dataView = new DataView(buffer) + let str = "" + for (let i = 0; i < dataView.byteLength; ++i) { + str += String.fromCharCode(dataView.getUint8(i)) + } + console.log("on connect received:" + str) +}); +tlsTwoWay.on('connect', () => { + console.log("on connect") +}); +tlsTwoWay.on('close', () => { + console.log("on close") +}); + +// Bind the local IP address and port number. +tlsOneWay.bind({address: '192.168.xxx.xxx', port: xxxx, family: 1}, err => { + if (err) { + console.log('bind fail'); + return; + } + console.log('bind success'); +}); + +// Set the communication parameters. +let oneWayOptions = { + address: { + address: "192.168.xxx.xxx", + port: xxxx, + family: 1, + }, + secureOptions: { + ca: ["xxxx","xxxx"], // CA certificate + cipherSuite: "AES256-SHA256", // Cipher suite + }, +}; + +// Set up a connection. +tlsOneWay.connect(oneWayOptions, (err, data) => { + console.error(err); + console.log(data); +}); + +// Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events. +tlsTwoWay.close((err) => { + if (err) { + console.log("close callback error = " + err); + } else { + console.log("close success"); + } + tlsTwoWay.off('message'); + tlsTwoWay.off('connect'); + tlsTwoWay.off('close'); +}); +``` \ No newline at end of file diff --git a/en/application-dev/connectivity/websocket-connection.md b/en/application-dev/connectivity/websocket-connection.md index b68d537fc5ad96f3ab60b53e2e75a96d7b555f76..dfcc9bd7f877393bf1bf8d868046d5d12e896678 100644 --- a/en/application-dev/connectivity/websocket-connection.md +++ b/en/application-dev/connectivity/websocket-connection.md @@ -1,45 +1,42 @@ # WebSocket Connection - -## Use Cases +## When to Use You can use WebSocket to establish a bidirectional connection between a server and a client. Before doing this, you need to use the **createWebSocket()** API to create a **WebSocket** object and then use the **connect()** API to connect to the server. If the connection is successful, the client will receive a callback of the **open** event. Then, the client can communicate with the server using the **send()** API. When the server sends a message to the client, the client will receive a callback of the **message** event. If the client no longer needs this connection, it can call the **close()** API to disconnect from the server. Then, the client will receive a callback of the **close** event. If an error occurs in any of the preceding processes, the client will receive a callback of the **error** event. - ## Available APIs The WebSocket connection function is mainly implemented by the WebSocket module. To use related APIs, you must declare the **ohos.permission.INTERNET** permission. The following table describes the related APIs. -| API | Description | +| API| Description| | -------- | -------- | -| createWebSocket() | Creates a WebSocket connection. | -| connect() | Establishes a WebSocket connection to a given URL. | -| send() | Sends data through the WebSocket connection. | -| close() | Closes a WebSocket connection. | -| on(type: 'open') | Enables listening for **open** events of a WebSocket connection. | -| off(type: 'open') | Disables listening for **open** events of a WebSocket connection. | -| on(type: 'message') | Enables listening for **message** events of a WebSocket connection. | -| off(type: 'message') | Disables listening for **message** events of a WebSocket connection. | -| on(type: 'close') | Enables listening for **close** events of a WebSocket connection. | -| off(type: 'close') | Disables listening for **close** events of a WebSocket connection. | -| on(type: 'error') | Enables listening for **error** events of a WebSocket connection. | -| off(type: 'error') | Disables listening for **error** events of a WebSocket connection. | - +| createWebSocket() | Creates a WebSocket connection.| +| connect() | Establishes a WebSocket connection to a given URL.| +| send() | Sends data through the WebSocket connection.| +| close() | Closes a WebSocket connection.| +| on(type: 'open') | Enables listening for **open** events of a WebSocket connection.| +| off(type: 'open') | Disables listening for **open** events of a WebSocket connection.| +| on(type: 'message') | Enables listening for **message** events of a WebSocket connection.| +| off(type: 'message') | Disables listening for **message** events of a WebSocket connection.| +| on(type: 'close') | Enables listening for **close** events of a WebSocket connection.| +| off(type: 'close') | Disables listening for **close** events of a WebSocket connection.| +| on(type: 'error') | Enables listening for **error** events of a WebSocket connection.| +| off(type: 'error') | Disables listening for **error** events of a WebSocket connection.| ## How to Develop -1. Import the required WebSocket module. +1. Import the required webSocket module. 2. Create a **WebSocket** object. -3. (Optional) Subscribe to WebSocket open, message, close, and error events. +3. (Optional) Subscribe to WebSocket **open**, **message**, **close**, and **error** events. 4. Establish a WebSocket connection to a given URL. 5. Close the WebSocket connection if it is no longer needed. - + ```js import webSocket from '@ohos.net.webSocket'; diff --git a/en/application-dev/database/Readme-EN.md b/en/application-dev/database/Readme-EN.md index a8aff91550cb265137a89d8718f6232c34e1aa43..77e1d8f9738d949ce9b0f0396bf66f99b9bf924e 100644 --- a/en/application-dev/database/Readme-EN.md +++ b/en/application-dev/database/Readme-EN.md @@ -1,21 +1,22 @@ # Data Management -- Distributed Data Service - - [Distributed Data Service Overview](database-mdds-overview.md) - - [Distributed Data Service Development](database-mdds-guidelines.md) - -- Relational Database - - [RDB Overview](database-relational-overview.md) - - [RDB Development](database-relational-guidelines.md) - -- Preferences - - [Preferences Overview](database-preference-overview.md) - - [Preferences Development](database-preference-guidelines.md) - -- Distributed Data Object - - [Distributed Data Object Overview](database-distributedobject-overview.md) - - [Distributed Data Object Development](database-distributedobject-guidelines.md) - -- Data Share - - [DataShare Overview](database-datashare-overview.md) - - [DataShare Development](database-datashare-guidelines.md) +- [Data Management Overview](data-mgmt-overview.md) +- Application Data Persistence + - [Overview of Application Data Persistence](app-data-persistence-overview.md) + - [Persisting Preferences Data](data-persistence-by-preferences.md) + - [Persisting KV Store Data](data-persistence-by-kv-store.md) + - [Persisting RDB Store Data](data-persistence-by-rdb-store.md) +- Distributed Application Data Synchronization + - [Distributed Application Data Synchronization Overview](sync-app-data-across-devices-overview.md) + - [Cross-Device Synchronization of KV Stores](data-sync-of-kv-store.md) + - [Cross-Device Synchronization of RDB Stores](data-sync-of-rdb-store.md) + - [Cross-Device Synchronization of Distributed Data Objects](data-sync-of-distributed-data-object.md) +- Data Reliability and Security + - [Data Reliability and Security Overview](data-reliability-security-overview.md) + - [Database Backup and Restoration](data-backup-and-restore.md) + - [Database Encryption](data-encryption.md) + - [Access Control by Device and Data Level](access-control-by-device-and-data-level.md) +- Cross-Application Data Sharing (for System Applications Only) + - [Cross-Application Data Sharing Overview](share-device-data-across-apps-overview.md) + - [Sharing Data Using DataShareExtensionAbility](share-data-by-datashareextensionability.md) + - [Sharing Data in Silent Access](share-data-by-silent-access.md) diff --git a/en/application-dev/database/access-control-by-device-and-data-level.md b/en/application-dev/database/access-control-by-device-and-data-level.md new file mode 100644 index 0000000000000000000000000000000000000000..16ed5d5988636fc7e669cb1cba5a14d56ef7d67c --- /dev/null +++ b/en/application-dev/database/access-control-by-device-and-data-level.md @@ -0,0 +1,122 @@ +# Access Control by Device and Data Level + + +## Basic Concepts + +Distributed data management implements access control based on data security labels and device security levels. + +A higher data security label and device security level indicate stricter encryption and access control measures and higher data security. + + +### Data Security Labels + +The data can be rated into four security levels: S1, S2, S3, and S4. + + | Risk Level| Security Level| Definition| Example| +| -------- | -------- | -------- | -------- | +| Critical| S4 | Special data types defined by industry laws and regulations, involving the most private individual information or data that may cause significant adverse impact on an individual or group once disclosed, tampered with, corrupted, or destroyed.| Political opinions, religious and philosophical belief, trade union membership, genetic data, biological information, health and sexual life status, sexual orientation, device authentication, and personal credit card information| +| High| S3 | Data that may cause critical adverse impact on an individual or group once disclosed, tampered with, corrupted, or destroyed.| Individual real-time precise positioning information and movement trajectory| +| Moderate| S2 | Data that may cause major adverse impact on an individual or group once disclosed, tampered with, corrupted, or destroyed.| Detailed addresses and nicknames of individuals| +| Low| S1 | Data that may cause minor adverse impact on an individual or group once disclosed, tampered with, corrupted, or destroyed.| Gender, nationality, and user application records| + + +### Device Security Levels + +Device security levels are classified into SL1 to SL5 based on devices' security capabilities, for example, whether a Trusted Execution Environment (TEE) or a secure storage chip is available. For example, the development boards RK3568 and Hi3516 are SL1 (lower security) devices, and tablets are SL4 (higher security) devices. + +During device networking, you can run the **hidumper -s 3511** command to query the device security level. The following example shows how to query the security level of the RK3568 board: + +![en-us_image_0000001542496993](figures/en-us_image_0000001542496993.png) + + +## Access Control Mechanism in Cross-Device Synchronization + +In cross-device data synchronization, data access is controlled based on the device security level and data security labels. In principle, data can be synchronized only to the devices whose data security labels are not higher than the device's security level. The access control matrix is as follows: + +|Device Security Level|Data Security Labels of the Synchornizable Device| +|---|---| +|SL1|S1| +|SL2|S1 to S2| +|SL3|S1 to S3| +|SL4|S1 to S4| +|SL5|S1 to S4| + +For example, the security level of development boards RK3568 and Hi3516 is SL1. The database with data security label S1 can be synchronized with RK3568 and Hi3516, but the databases with database labels S2-S4 cannot. + + +## When to Use + +The access control mechanism ensures secure data storage and synchronization across devices. When creating a database, you need to correctly set the security level for the database. + + +## Setting the Security Level for a KV Store + +When a KV store is created, the **securityLevel** parameter specifies the security level of the KV store. The following example shows how to create a KV store with security level of S1. + +For details about the APIs, see [Distributed KV Store](../reference/apis/js-apis-distributedKVStore.md). + + + +```js +import distributedKVStore from '@ohos.data.distributedKVStore'; + +let kvManager; +let context = getContext(this); +const kvManagerConfig = { + context: context, + bundleName: 'com.example.datamanagertest' +} +try { + kvManager = distributedKVStore.createKVManager(kvManagerConfig); + console.info('Succeeded in creating KVManager.'); +} catch (e) { + console.error(`Failed to create KVManager. Code:${e.code},message:${e.message}`); +} +let kvStore; +try { + const options = { + createIfMissing: true, + encrypt: true, + backup: false, + autoSync: true, + kvStoreType: distributedKVStore.KVStoreType.SINGLE_VERSION, + securityLevel: distributedKVStore.SecurityLevel.S1 + }; + kvManager.getKVStore('storeId', options, (err, store) => { + if (err) { + console.error(`Failed to get KVStore. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting KVStore.'); + kvStore = store; + }); +} catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); +} +``` + + +## Setting the Security Level for an RDB Store + +When an RDB store is created, the **securityLevel** parameter specifies the security level of the RDB store. The following example shows how to create an RDB store with security level of S1. + +For details about the APIs, see [RDB Store](../reference/apis/js-apis-data-relationalStore.md). + + + +```js +import relationalStore from '@ohos.data.relationalStore'; + +let store; +const STORE_CONFIG = { + name: 'RdbTest.db', + securityLevel: relationalStore.SecurityLevel.S1 +}; +let promise = relationalStore.getRdbStore(this.context, STORE_CONFIG); +promise.then(async (rdbStore) => { + store = rdbStore; + console.info('Succeeded in getting RdbStore.') +}).catch((err) => { + console.error(`Failed to get RdbStore. Code:${err.code},message:${err.message}`); +}) +``` diff --git a/en/application-dev/database/app-data-persistence-overview.md b/en/application-dev/database/app-data-persistence-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..99690708daa2e14ae66566907ff28aa086e0cc9b --- /dev/null +++ b/en/application-dev/database/app-data-persistence-overview.md @@ -0,0 +1,17 @@ +# Application Data Persistence Overview + + +Application data persistence means to save the application data in the memory to a file or database on a device. The data in the memory is usually saved in the forms of data structs or data objects, and the data in storage media can be saved in the forms of text, databases, or binary files. + + +The OpenHarmony standard system supports typical data storage forms, including user preferences (**Preferences**), key-value databases (**KV-Store**), and relational databases (**RelationalStore**). + + +You can use proper data storage forms to implement data persistence: + + +- **Preferences**: used to store application configuration data. Data is stored as text files on a device. When the application is used, it loads all the data from the text file to the memory. **Preferences** allow fast and efficient data access, but are not suitable when a large amount of data needs to be stored. + +- **KV-Store**: used to store data in KV pairs, in which the key uniquely identifies the data. A KV store is a kind of non-relational database. It is ideal for storing service data with few data and service relationships. It has been widely used because it poses fewer database version compatibility issues in distributed scenarios and simplifies conflict handling in data synchronization. KV databases feature higher cross-device and cross-version compatibility than relational databases. + +- **RelationalStore**: used to store data in rows and columns. It is widely used to process relational data in applications. RelationalStore provides a set of APIs for adding, deleting, modifying, and querying data. You can also define and use SQL statements for complex service scenarios. diff --git a/en/application-dev/database/data-backup-and-restore.md b/en/application-dev/database/data-backup-and-restore.md new file mode 100644 index 0000000000000000000000000000000000000000..0f95a70dbbf782a438368e5afd8d84581127fcc7 --- /dev/null +++ b/en/application-dev/database/data-backup-and-restore.md @@ -0,0 +1,231 @@ +# Database Backup and Restoration + + +## When to Use + +You may need to restore a database in any of the following cases: + +An important operation being performed by an application is interrupted. + +The database is unavailable due to data loss or corruption, or dirty data. + + +Both KV stores and RDB stores support database backup and restoration. In addition, KV stores allow you to delete database backups to release local storage space. + + +## Backing Up, Restoring, and Deleting a KV Store + +You can use **backup()** to back up a KV store, use **restore()** to restore a KV store, and use **deletebackup()** to delete a KV store backup file. For details about the APIs, see [Distributed KV Store](../reference/apis/js-apis-distributedKVStore.md). + +1. Create a KV store. + + (1) Create a **kvManager** instance. + + (2) Set database parameters. + + (3) Create a **kvStore** instance. + + + ```js + import distributedKVStore from '@ohos.data.distributedKVStore'; + + let kvManager; + let context = getContext(this); + const kvManagerConfig = { + context: context, + bundleName: 'com.example.datamanagertest' + } + try { + kvManager = distributedKVStore.createKVManager(kvManagerConfig); + console.info('Succeeded in creating KVManager.'); + } catch (e) { + console.error(`Failed to create KVManager. Code:${e.code},message:${e.message}`); + } + let kvStore; + try { + const options = { + createIfMissing: true, + encrypt: false, + backup: false, + autoSync: true, + kvStoreType: distributedKVStore.KVStoreType.SINGLE_VERSION, + securityLevel: distributedKVStore.SecurityLevel.S2 + }; + kvManager.getKVStore('storeId', options, (err, store) => { + if (err) { + console.error(`Fail to get KVStore. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting KVStore.'); + kvStore = store; + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +2. Use **put()** to insert data to the KV store. + + ```js + const KEY_TEST_STRING_ELEMENT = 'key_test_string'; + const VALUE_TEST_STRING_ELEMENT = 'value_test_string'; + try { + kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Fail to put data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +3. Use **backup()** to back up the KV store. + + ```js + let file = 'BK001'; + try { + kvStore.backup(file, (err) => { + if (err) { + console.error(`Fail to backup data.code:${err.code},message:${err.message}`); + } else { + console.info('Succeeded in backupping data.'); + } + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +4. Use **delete()** to delete data to simulate unexpected deletion or data tampering. + + ```js + try { + kvStore.delete(KEY_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Fail to delete data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in deleting data.'); + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +5. Use **restore()** to restore the KV store. + + ```js + let file = 'BK001'; + try { + kvStore.restore(file, (err) => { + if (err) { + console.error(`Fail to restore data. Code:${err.code},message:${err.message}`); + } else { + console.info('Succeeded in restoring data.'); + } + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +6. Use **deleteBackup()** to delete the backup file to release storage space. + + ```js + let kvStore; + let files = ['BK001']; + try { + kvStore.deleteBackup(files).then((data) => { + console.info(`Succeed in deleting Backup. Data:filename is ${data[0]},result is ${data[1]}.`); + }).catch((err) => { + console.error(`Fail to delete Backup. Code:${err.code},message:${err.message}`); + }) + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + + +## Backing Up and Restoring an RDB Store + +You can use **backup()** to back up an RDB store, and use **restore()** to restore an RDB store. For details about the APIs, see [RDB Store](../reference/apis/js-apis-data-relationalStore.md). + +1. Use **getRdbStore()** to create an RDB store. + + ```js + import relationalStore from '@ohos.data.relationalStore'; + + let store; + let context = getContext(this); + const STORE_CONFIG = { + name: 'RdbTest.db', + securityLevel: relationalStore.SecurityLevel.S1 + }; + relationalStore.getRdbStore(context, STORE_CONFIG, (err, rdbStore) => { + store = rdbStore; + if (err) { + console.error(`Failed to get RdbStore. Code:${err.code},message:${err.message}`); + return; + } + store.executeSql("CREATE TABLE IF NOT EXISTS EMPLOYEE (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, age INTEGER, salary INTEGER, codes Uint8Array);", null); + console.info('Succeeded in getting RdbStore.'); + }) + ``` + +2. Use **insert()** to insert data to the RDB store. + + ```js + const valueBucket = { + 'NAME': 'Lisa', + 'AGE': 18, + 'SALARY': 100.5, + 'CODES': new Uint8Array([1, 2, 3, 4, 5]) + }; + store.insert('EMPLOYEE', valueBucket, relationalStore.ConflictResolution.ON_CONFLICT_REPLACE, (err, rowId) => { + if (err) { + console.error(`Failed to insert data. Code:${err.code},message:${err.message}`); + return; + } + console.info(`Succeeded in inserting data. rowId:${rowId}`); + }) + ``` + +3. Use **backup()** to back up the RDB store. + + ```js + store.backup('dbBackup.db', (err) => { + if (err) { + console.error(`Failed to backup data. Code:${err.code},message:${err.message}`); + return; + } + console.info(`Succeeded in backuping data.`); + }) + ``` + +4. Use **delete()** to delete data to simulate unexpected deletion or data tampering. + + ```js + let predicates = new relationalStore.RdbPredicates('EMPLOYEE'); + predicates.equalTo('NAME', 'Lisa'); + let promise = store.delete(predicates); + promise.then((rows) => { + console.info(`Delete rows: ${rows}`); + }).catch((err) => { + console.error(`Failed to delete data. Code:${err.code},message:${err.message}`); + }) + ``` + +5. Use **restore()** to restore the RDB store. + + ```js + store.restore('dbBackup.db', (err) => { + if (err) { + console.error(`Failed to restore data. Code:${err.code},message:${err.message}`); + return; + } + console.info(`Succeeded in restoring data.`); + }) + ``` diff --git a/en/application-dev/database/data-encryption.md b/en/application-dev/database/data-encryption.md new file mode 100644 index 0000000000000000000000000000000000000000..d9672ce7fdc54b93d736f4684ea1b454e512c207 --- /dev/null +++ b/en/application-dev/database/data-encryption.md @@ -0,0 +1,85 @@ +# Database Encryption + + +## When to Use + +OpenHarmony provides the database encryption capability to effectively protect the data stored in a database. Database encryption allows data to be stored and used in ciphertext, ensuring data confidentiality and integrity. + +The encrypted database can be accessed only using an API, and the database file cannot be opened in other ways. Whether a database is encrypted is set when the database is created, and the setting cannot be changed. + +Both KV stores and RDB stores support database encryption. + + +## Encrypting a KV Store + +When a KV store is created, the **encrypt** parameter in **options** specifies whether to encrypt the KV store. The value **true** means to encrypt the KV store, and the value **false** (default) means the opposite. + +For details about the APIs, see [Distributed KV Store](../reference/apis/js-apis-distributedKVStore.md). + + +```js +import distributedKVStore from '@ohos.data.distributedKVStore'; + +let kvManager; +let context = getContext(this); +const kvManagerConfig = { + context: context, + bundleName: 'com.example.datamanagertest', +} +try { + kvManager = distributedKVStore.createKVManager(kvManagerConfig); + console.info('Succeeded in creating KVManager.'); +} catch (e) { + console.error(`Failed to create KVManager. Code:${e.code},message:${e.message}`); +} +let kvStore; +try { + const options = { + createIfMissing: true, + // Whether to encrypt the KV store. + encrypt: true, + backup: false, + autoSync: true, + kvStoreType: distributedKVStore.KVStoreType.SINGLE_VERSION, + securityLevel: distributedKVStore.SecurityLevel.S2 + }; + kvManager.getKVStore('storeId', options, (err, store) => { + if (err) { + console.error(`Fail to get KVStore. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting KVStore.'); + kvStore = store; + }); +} catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); +} +``` + + +## Encrypting an RDB Store + +When an RDB store is created, the **encrypt** parameter in **options** specifies whether to encrypt the RDB store. The value **true** means to encrypt the RDB store, and the value **false** (default) means the opposite. + +For details about the APIs, see [RDB Store](../reference/apis/js-apis-data-relationalStore.md). + + +```js +import relationalStore from '@ohos.data.relationalStore'; + +let store; +let context = getContext(this); +const STORE_CONFIG = { + name: 'RdbTest.db', + securityLevel: relationalStore.SecurityLevel.S1, + encrypt: true +}; +relationalStore.getRdbStore(context, STORE_CONFIG, (err, rdbStore) => { + store = rdbStore; + if (err) { + console.error(`Failed to get RdbStore. Code:${err.code},message:${err.message}`); + return; + } + console.info(`Succeeded in getting RdbStore.`); +}) +``` diff --git a/en/application-dev/database/data-mgmt-overview.md b/en/application-dev/database/data-mgmt-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..aa98d97da5acdce3a382a70d383e140463a5399a --- /dev/null +++ b/en/application-dev/database/data-mgmt-overview.md @@ -0,0 +1,36 @@ +# Data Management Overview + + +## Function + +Data management provides data storage, management, and synchronization capabilities. For example, you can store the Contacts application data in database for secure management and shared access, and synchronize the contacts information with a smart watch. + +- Data storage: provides data persistence capabilities, which can be classified into user preferences, key-value (KV) stores, and relational database (RDB) stores by data characteristics. + +- Data management: provides efficient data management capabilities, including permission management, data backup and restoration, and dataShare framework. + +- Data synchronization: provides data synchronization across devices. For example, distributed data objects support sharing of memory objects across devices, and distributed databases support database access across devices. + +The database stores created by an application are saved to the application sandbox. When the application is uninstalled, the database stores are automatically deleted. + + +## Working Principles + +The data management module includes user preferences (**Preferences**), KV data management (**KV-Store**), RDB data management (**RelationalStore**), distributed data object (**DataObject**), and cross-application data management (**DataShare**). The interface layer provides standard JavaScript APIs for application development. The Frameworks&System service layer implements storage and synchronization of component data, and provides dependencies for SQLite and other subsystems. + + **Figure 1** Data management architecture + +![dataManagement](figures/dataManagement.jpg) + + +- **Preferences**: implements persistence of lightweight configuration data and supports subscription of data change notifications. Preferences are used to store application configuration information and user preference settings and do not support distributed synchronization. + +- **KV-Store**: implements read, write, encryption, and manual backup of data in KV stores and notification subscription. When an application needs to use the distributed capabilities of KV stores, KV-Store sends a synchronization request to DatamgrService to implement data synchronization across devices. + +- **RelationalStore**: implements addition, deletion, modification, query, encryption, manually backup of data in RDB stores, and notification subscription. When an application needs to use the distributed capabilities of an RDB store, RelationalStore sends a synchronization request to DatamgrService to implement data synchronization across devices. + +- **DataObject**: independently provides distributed capabilities for the data of object structs. For the object data that is still required after the restart of an application (either the cross-device application or local device application), the **DatamgrService** implements temporary storage of the object data. + +- **DataShare**: provides the data provider-consumer mode to implement addition, deletion, modification, and query of cross-application data on a device, and notification subscription. **DataShare** is not bound to any database and can interact with RDB and KV stores. You can also encapsulate your own databases for C/C++ applications.
In addition to the provider-consumer mode, **DataShare** provides silent access, which allows direct access to the provider's data via the DatamgrService proxy instead of starting the provider. Currently, only the RDB stores support silent access. + +- **DatamgrService**: implements synchronization and cross-application sharing for other components, including cross-device synchronization of **RelationalStore** and **KV-Store**, silent access to provider data of **DataShare**, and temporary storage of **DataObject** synchronization object data. diff --git a/en/application-dev/database/data-persistence-by-kv-store.md b/en/application-dev/database/data-persistence-by-kv-store.md new file mode 100644 index 0000000000000000000000000000000000000000..804fb6b12764b95cec9566bdc165234284d32a8d --- /dev/null +++ b/en/application-dev/database/data-persistence-by-kv-store.md @@ -0,0 +1,199 @@ +# Persisting KV Store Data + + +## When to Use + +The key-value (KV) database stores data in the form of KV pairs. You can use KV stores to store data organized in a simple model, for example, product names and prices or employee IDs and daily attendance. The simple data structure allows higher compatibility with different database versions and device types. + + +## Constraints + +- For each record in a device KV store, the key cannot exceed 896 bytes and the value cannot exceed 4 MB. + +- For each record in a single KV store, the key cannot exceed 1 KB and the value cannot exceed 4 MB. + +- A maximum of 16 distributed KV stores can be opened simultaneously for an application. + +- Blocking operations, for example, modifying UI components, are not allowed in the KV store event callbacks. + + +## Available APIs + +The following table lists the APIs used for KV data persistence. Most of the APIs are executed asynchronously, using a callback or promise to return the result. The following table uses the callback-based APIs as an example. For more information about the APIs, see [Distributed KV Store](../reference/apis/js-apis-distributedKVStore.md). + +| API| Description| +| -------- | -------- | +| createKVManager(config: KVManagerConfig): KVManager | Creates a **KvManager** instance to manage database objects.| +| getKVStore<T>(storeId: string, options: Options, callback: AsyncCallback<T>): void | Creates and obtains a KV store of the specified type.| +| put(key: string, value: Uint8Array\|string\|number\|boolean, callback: AsyncCallback<void>): void | Adds a KV pair of the specified type to this KV store.| +| get(key: string, callback: AsyncCallback<Uint8Array\|string\|boolean\|number>): void | Obtains the value of the specified key.| +| delete(key: string, callback: AsyncCallback<void>): void | Deletes a KV pair based on the specified key.| + + +## How to Develop + +1. Create a **KvManager** instance to manage database objects. + + Example: + + Stage model: + + + ```js + // Import the module. + import distributedKVStore from '@ohos.data.distributedKVStore'; + + // Stage model + import UIAbility from '@ohos.app.ability.UIAbility'; + + let kvManager; + + export default class EntryAbility extends UIAbility { + onCreate() { + let context = this.context; + const kvManagerConfig = { + context: context, + bundleName: 'com.example.datamanagertest' + }; + try { + // Create a KVManager instance. + kvManager = distributedKVStore.createKVManager(kvManagerConfig); + console.info('Succeeded in creating KVManager.'); + // Create and obtain the database. + } catch (e) { + console.error(`Failed to create KVManager. Code:${e.code},message:${e.message}`); + } + } + } + ``` + + FA model: + + + ```js + // Import the module. + import distributedKVStore from '@ohos.data.distributedKVStore'; + + // FA model + import featureAbility from '@ohos.ability.featureAbility'; + + let kvManager; + let context = featureAbility.getContext(); // Obtain the context. + const kvManagerConfig = { + context: context, + bundleName: 'com.example.datamanagertest' + }; + try { + kvManager = distributedKVStore.createKVManager(kvManagerConfig); + console.info('Succeeded in creating KVManager.'); + // Create and obtain the database. + } catch (e) { + console.error(`Failed to create KVManager. Code:${e.code},message:${e.message}`); + } + ``` + +2. Create and obtain a KV store. + + Example: + + ```js + try { + const options = { + createIfMissing: true, // Whether to create a KV store when the database file does not exist. By default, a KV store is created. + createIfMissing: true, // Whether to encrypt database files. By default, database files are not encrypted. + backup: false, // Whether to back up database files. By default, the database files are backed up. + autoSync: true, // Whether to automatically synchronize database files. The value **true** means to automatically synchronize database files; the value **false** (default) means the opposite. + kvStoreType: distributedKVStore.KVStoreType.SINGLE_VERSION, // Type of the KV store to create. By default, a device KV store is created. + securityLevel: distributedKVStore.SecurityLevel.S2 // Security level of the KV store. + }; + // storeId uniquely identifies a KV store. + kvManager.getKVStore('storeId', options, (err, kvStore) => { + if (err) { + console.error(`Failed to get KVStore. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting KVStore.'); + // Perform related data operations. + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +3. Use **put()** to add data to the KV store. + + Example: + + ```js + const KEY_TEST_STRING_ELEMENT = 'key_test_string'; + const VALUE_TEST_STRING_ELEMENT = 'value_test_string'; + try { + kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Failed to put data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + + > **NOTE** + > + > The **put()** method adds a KV pair if the specified key does not exists and changes the value if the the specified key already exists. + +4. Use **get()** to obtain the value of a key. + + Example: + + ```js + const KEY_TEST_STRING_ELEMENT = 'key_test_string'; + const VALUE_TEST_STRING_ELEMENT = 'value_test_string'; + try { + kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Failed to put data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + kvStore.get(KEY_TEST_STRING_ELEMENT, (err, data) => { + if (err !== undefined) { + console.error(`Failed to get data. Code:${err.code},message:${err.message}`); + return; + } + console.info(`Succeeded in getting data. data:${data}`); + }); + }); + } catch (e) { + console.error(`Failed to get data. Code:${e.code},message:${e.message}`); + } + ``` + +5. Use **delete()** to delete the data of the specified key. + + Example: + + ```js + const KEY_TEST_STRING_ELEMENT = 'key_test_string'; + const VALUE_TEST_STRING_ELEMENT = 'value_test_string'; + try { + kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Failed to put data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + kvStore.delete(KEY_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Failed to delete data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in deleting data.'); + }); + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` diff --git a/en/application-dev/database/data-persistence-by-preferences.md b/en/application-dev/database/data-persistence-by-preferences.md new file mode 100644 index 0000000000000000000000000000000000000000..a8258270a7e2bcfc2305c156ce8e3314d03bb311 --- /dev/null +++ b/en/application-dev/database/data-persistence-by-preferences.md @@ -0,0 +1,250 @@ +# Persisting Preferences Data + + +## When to Use + +The **Preferences** module provides APIs for processing data in the form of key-value (KV) pairs, and supports persistence of the KV pairs when required, as well as modification and query of the data. You can use **Preferences** when you want a unique storage for global data. **Preferences** caches data in the memory, which allows fast access when the data is required. **Preferences** is recommended for storing small amount of data, such as personalized settings (font size and whether to enable the night mode) of applications. + + +## Working Principles + +User applications call **Preference** through the JS interface to read and write data files. You can load the data of a **Preferences** persistence file to a **Preferences** instance. Each file uniquely corresponds to an instance. The system stores the instance in memory through a static container until the instance is removed from the memory or the file is deleted. The following figure illustrates how **Preference** works. + +The preference persistent file of an application is stored in the application sandbox. You can use **context** to obtain the file path. For details, see [Obtaining the Application Development Path](../application-models/application-context-stage.md#obtaining-the-application-development-path). + +**Figure 1** Preferences working mechanism + +![preferences](figures/preferences.jpg) + + +## Constraints + +- The key in a KV pair must be a string and cannot be empty or exceed 80 bytes. + +- If the value is of the string type, it can be empty or a string not longer than 8192 bytes. + +- The memory usage increases with the amount of **Preferences** data. The maximum number of data records recommended is 10,000. Otherwise, high memory overheads will be caused. + + +## Available APIs + +The following table lists the APIs used for preferences data persistence. Most of the APIs are executed asynchronously, using a callback or promise to return the result. The following table uses the callback-based APIs as an example. For more information about the APIs, see [User Preferences](../reference/apis/js-apis-data-preferences.md). + +| API| Description| +| -------- | -------- | +| getPreferences(context: Context, name: string, callback: AsyncCallback<Preferences>): void | Obtain a **Preferences** instance.| +| put(key: string, value: ValueType, callback: AsyncCallback<void>): void | Writes data to the Preferences instance. You can use **flush()** to persist the **Preferences** instance data.| +| has(key: string, callback: AsyncCallback<boolean>): void | Checks whether the **Preferences** instance contains a KV pair with the given key. The key cannot be empty.| +| get(key: string, defValue: ValueType, callback: AsyncCallback<ValueType>): void | Obtains the value of the specified key. If the value is null or not of the default value type, **defValue** is returned.| +| delete(key: string, callback: AsyncCallback<void>): void | Deletes the KV pair with the given key from the **Preferences** instance.| +| flush(callback: AsyncCallback<void>): void | Flushes the data of this **Preferences** instance to a file for data persistence.| +| on(type: 'change', callback: Callback<{ key : string }>): void | Subscribes to data changes of the specified key. When the value of the specified key is changed and saved by **flush()**, a callback will be invoked to return the new data.| +| off(type: 'change', callback?: Callback<{ key : string }>): void | Unsubscribes from data changes.| +| deletePreferences(context: Context, name: string, callback: AsyncCallback<void>): void | Deletes a **Preferences** instance from memory. If the **Preferences** instance has a persistent file, this API also deletes the persistent file.| + + +## How to Develop + +1. Import the **@ohos.data.preferences** module. + + ```js + import dataPreferences from '@ohos.data.preferences'; + ``` + +2. Obtain a **Preferences** instance. Read data from a file and load the data to a **Preferences** instance for data operations. + + Stage model: + + + ```js + import UIAbility from '@ohos.app.ability.UIAbility'; + + class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage) { + try { + dataPreferences.getPreferences(this.context, 'mystore', (err, preferences) => { + if (err) { + console.error(`Failed to get preferences. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting preferences.'); + // Perform related data operations. + }) + } catch (err) { + console.error(`Failed to get preferences. Code:${err.code},message:${err.message}`); + } + } + } + ``` + + FA model: + + + ```js + import featureAbility from '@ohos.ability.featureAbility'; + + // Obtain the context. + let context = featureAbility.getContext(); + + try { + dataPreferences.getPreferences(context, 'mystore', (err, preferences) => { + if (err) { + console.error(`Failed to get preferences. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting preferences.'); + // Perform related data operations. + }) + } catch (err) { + console.error(`Failed to get preferences. Code is ${err.code},message:${err.message}`); + } + ``` + +3. Write data. + + Use **put()** to write data to the **Preferences** instance. After data is written, you can use **flush()** to persist the **Preferences** instance data to a file if necessary. + + > **NOTE** + > + > If the specified key already exists, the **put()** method changes the value. To prevent a value from being changed by mistake, you can use **has()** to check whether the KV pair exists. + + Example: + + + ```js + try { + preferences.has('startup', function (err, val) { + if (err) { + console.error(`Failed to check the key 'startup'. Code:${err.code}, message:${err.message}`); + return; + } + if (val) { + console.info("The key 'startup' is contained."); + } else { + console.info("The key 'startup' does not contain."); + // Add a KV pair. + try { + preferences.put('startup', 'auto', (err) => { + if (err) { + console.error(`Failed to put data. Code:${err.code}, message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + }) + } catch (err) { + console.error(`Failed to put data. Code: ${err.code},message:${err.message}`); + } + } + }) + } catch (err) { + console.error(`Failed to check the key 'startup'. Code:${err.code}, message:${err.message}`); + } + ``` + +4. Read data. + + Use **get()** to obtain the value of the specified key. If the value is null or is not of the default value type, the default data is returned. Example: + + ```js + try { + preferences.get('startup', 'default', (err, val) => { + if (err) { + console.error(`Failed to get value of 'startup'. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`Succeeded in getting value of 'startup'. val: ${val}.`); + }) + } catch (err) { + console.error(`Failed to get value of 'startup'. Code:${err.code}, message:${err.message}`); + } + ``` + +5. Delete data. + + Use delete() to delete a KV pair.
Example: + + + ```js + try { + preferences.delete('startup', (err) => { + if (err) { + console.error(`Failed to delete the key 'startup'. Code:${err.code}, message:${err.message}`); + return; + } + console.info("Succeeded in deleting the key 'startup'."); + }) + } catch (err) { + console.error(`Failed to delete the key 'startup'. Code:${err.code}, message:${err.message}`); + } + ``` + +6. Persist data. + + You can use **flush()** to persist the data held in a **Preferences** instance to a file. Example: + + ```js + try { + preferences.flush((err) => { + if (err) { + console.error(`Failed to flush. Code:${err.code}, message:${err.message}`); + return; + } + console.info('Succeeded in flushing.'); + }) + } catch (err) { + console.error(`Failed to flush. Code:${err.code}, message:${err.message}`); + } + ``` + +7. Subscribe to data changes. + + Specify an observer as the callback to return the data changes for an application. When the value of the subscribed key is changed and saved by **flush()**, the observer callback will be invoked to return the new data. Example: + + ```js + let observer = function (key) { + console.info('The key' + key + 'changed.'); + } + preferences.on('change', observer); + // The data is changed from 'auto' to 'manual'. + preferences.put('startup', 'manual', (err) => { + if (err) { + console.error(`Failed to put the value of 'startup'. Code:${err.code},message:${err.message}`); + return; + } + console.info("Succeeded in putting the value of 'startup'."); + preferences.flush((err) => { + if (err) { + console.error(`Failed to flush. Code:${err.code}, message:${err.message}`); + return; + } + console.info('Succeeded in flushing.'); + }) + }) + ``` + +8. Delete a **Preferences** instance from the memory. + + Use **deletePreferences()** to delete a **Preferences** instance from the memory. If the **Preferences** instance has a persistent file, the persistent file and its backup and corrupted files will also be deleted. + + > **NOTE** + > + > - The deleted **Preferences** instance cannot be used for data operations. Otherwise, data inconsistency will be caused. + > + > - The deleted data and files cannot be restored. + + Example: + + + ```js + try { + dataPreferences.deletePreferences(this.context, 'mystore', (err, val) => { + if (err) { + console.error(`Failed to delete preferences. Code:${err.code}, message:${err.message}`); + return; + } + console.info('Succeeded in deleting preferences.'); + }) + } catch (err) { + console.error(`Failed to delete preferences. Code:${err.code}, message:${err.message}`); + } + ``` \ No newline at end of file diff --git a/en/application-dev/database/data-persistence-by-rdb-store.md b/en/application-dev/database/data-persistence-by-rdb-store.md new file mode 100644 index 0000000000000000000000000000000000000000..dd374f26a10e64b62d84f111513f3d3c528017df --- /dev/null +++ b/en/application-dev/database/data-persistence-by-rdb-store.md @@ -0,0 +1,243 @@ +# Persisting RDB Store Data + + +## When to Use + +A relational database (RDB) store is used to store data in complex relational models, such as the student information including names, student IDs, and scores of each subject, or employee information including names, employee IDs, and positions, based on SQLite. The data is more complex than key-value (KV) pairs due to strict mappings. You can use **RelationalStore** to implement persistence of this type of data. + + +## Basic Concepts + +- **Predicates**: A representation of the property or feature of a data entity, or the relationship between data entities. It is used to define operation conditions. + +- **ResultSet**: a set of query results, which allows access to the required data in flexible modes. + + +## Working Principles + +**RelationalStore** provides APIs for applications to perform data operations. With SQLite as the underlying persistent storage engine, **RelationalStore** provides SQLite database features, including transactions, indexes, views, triggers, foreign keys, parameterized queries, prepared SQL statements, and more. + +**Figure 1** Working mechanism + +![relationStore_local](figures/relationStore_local.jpg) + + +## Constraints + +- The default logging mode is Write Ahead Log (WAL), and the default flushing mode is **FULL** mode. + +- An RDB store can be connected to a maximum of four connection pools for user read operations. + +- To ensure data accuracy, only one write operation is allowed at a time. + +- Once an application is uninstalled, related database files and temporary files on the device are automatically deleted. + + +## Available APIs + +The following table lists the APIs used for RDB data persistence. Most of the APIs are executed asynchronously, using a callback or promise to return the result. The following table uses the callback-based APIs as an example. For more information about the APIs, see [RDB Store](../reference/apis/js-apis-data-relationalStore.md). + +| API| Description| +| -------- | -------- | +| getRdbStore(context: Context, config: StoreConfig, callback: AsyncCallback<RdbStore>): void | Obtains a **RdbStore** instance to implement RDB store operations. You can set **RdbStore** parameters based on actual requirements and use **RdbStore** APIs to perform data operations.| +| executeSql(sql: string, bindArgs: Array<ValueType>, callback: AsyncCallback<void>):void | Executes an SQL statement that contains specified arguments but returns no value.| +| insert(table: string, values: ValuesBucket, callback: AsyncCallback<number>):void | Inserts a row of data into a table.| +| update(values: ValuesBucket, predicates: RdbPredicates, callback: AsyncCallback<number>):void | Updates data in the RDB store based on the specified **RdbPredicates** instance.| +| delete(predicates: RdbPredicates, callback: AsyncCallback<number>):void | Deletes data from the RDB store based on the specified **RdbPredicates** instance.| +| query(predicates: RdbPredicates, columns: Array<string>, callback: AsyncCallback<ResultSet>):void | Queries data in the RDB store based on specified conditions.| +| deleteRdbStore(context: Context, name: string, callback: AsyncCallback<void>): void | Deletes an RDB store.| + + +## How to Develop + +1. Obtain an **RdbStore** instance.
Example: + + Stage model: + + ```js + import relationalStore from '@ohos.data.relationalStore'; // Import the module. + import UIAbility from '@ohos.app.ability.UIAbility'; + + class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage) { + const STORE_CONFIG = { + name: 'RdbTest.db', // Database file name. + securityLevel: relationalStore.SecurityLevel.S1 // Database security level. + }; + + const SQL_CREATE_TABLE ='CREATE TABLE IF NOT EXISTS EMPLOYEE (ID INTEGER PRIMARY KEY AUTOINCREMENT, NAME TEXT NOT NULL, AGE INTEGER, SALARY REAL, CODES BLOB)'; // SQL statement for creating a data table. + + relationalStore.getRdbStore(this.context, STORE_CONFIG, (err, store) => { + if (err) { + console.error(`Failed to get RdbStore. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`Succeeded in getting RdbStore.`); + store.executeSql(SQL_CREATE_TABLE); // Create a data table. + + // Perform operations such as adding, deleting, modifying, and querying data in the RDB store. + + }); + } + } + ``` + + FA model: + + + ```js + import relationalStore from '@ohos.data.relationalStore'; // Import the module. + import featureAbility from '@ohos.ability.featureAbility'; + + // Obtain the context. + let context = featureAbility.getContext(); + + const STORE_CONFIG = { + name: 'RdbTest.db', // Database file name. + securityLevel: relationalStore.SecurityLevel.S1 // Database security level. + }; + + const SQL_CREATE_TABLE ='CREATE TABLE IF NOT EXISTS EMPLOYEE (ID INTEGER PRIMARY KEY AUTOINCREMENT, NAME TEXT NOT NULL, AGE INTEGER, SALARY REAL, CODES BLOB)'; // SQL statement for creating a data table. + + relationalStore.getRdbStore(context, STORE_CONFIG, (err, store) => { + if (err) { + console.error(`Failed to get RdbStore. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`Succeeded in getting RdbStore.`); + store.executeSql(SQL_CREATE_TABLE); // Create a data table. + + // Perform operations such as adding, deleting, modifying, and querying data in the RDB store. + + }); + ``` + + > **NOTE** + > + > - The RDB store created by an application varies with the context. Multiple RDB stores are created for the same database name with different application contexts. For example, each UIAbility has its own context. + > + > - When an application calls **getRdbStore()** to obtain an RDB store instance for the first time, the corresponding database file is generated in the application sandbox. If you want to move the files of an RDB store to another place for view, you must also move the temporary files with finename extensions **-wal** or **-shm** in the same directory. Once an application is uninstalled, the database files and temporary files generated by the application on the device are also removed. + +2. Use **insert()** to insert data to the RDB store. Example: + + ```js + const valueBucket = { + 'NAME': 'Lisa', + 'AGE': 18, + 'SALARY': 100.5, + 'CODES': new Uint8Array([1, 2, 3, 4, 5]) + }; + store.insert('EMPLOYEE', valueBucket, (err, rowId) => { + if (err) { + console.error(`Failed to insert data. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`Succeeded in inserting data. rowId:${rowId}`); + }) + ``` + + > **NOTE** + > + > **RelationalStore** does not provide explicit flush operations for data persistence. Data inserted by **insert()** is stored in files persistently. + +3. Modify or delete data based on the specified **Predicates** instance. + + Use **update()** to modify data and **delete()** to delete data. + + Example: + + ```js + // Modify data. + const valueBucket = { + 'NAME': 'Rose', + 'AGE': 22, + 'SALARY': 200.5, + 'CODES': new Uint8Array([1, 2, 3, 4, 5]) + }; + let predicates = new relationalStore.RdbPredicates('EMPLOYEE'); // Create predicates for the table named EMPLOYEE. + predicates.equalTo('NAME', 'Lisa'); // Modify the data of Lisa in the EMPLOYEE table to the specified data. + store.update(valueBucket, predicates, (err, rows) => { + if (err) { + console.error(`Failed to update data. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`Succeeded in updating data. row count: ${rows}`); + }) + + // Delete data. + let predicates = new relationalStore.RdbPredicates('EMPLOYEE'); + predicates.equalTo('NAME', 'Lisa'); + store.delete(predicates, (err, rows) => { + if (err) { + console.error(`Failed to delete data. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`Delete rows: ${rows}`); + }) + ``` + +4. Query data based on the conditions specified by **Predicates**. + + Use **query()** to query data. The data obtained is returned in a **ResultSet** object. + + Example: + + ```js + let predicates = new relationalStore.RdbPredicates('EMPLOYEE'); + predicates.equalTo('NAME', 'Rose'); + store.query(predicates, ['ID', 'NAME', 'AGE', 'SALARY', 'CODES'], (err, resultSet) => { + if (err) { + console.error(`Failed to query data. Code:${err.code}, message:${err.message}`); + return; + } + console.info(`ResultSet column names: ${resultSet.columnNames}`); + console.info(`ResultSet column count: ${resultSet.columnCount}`); + }) + ``` + + > **NOTE** + > + > Use **close()** to close the **ResultSet** that is no longer used in a timely manner so that the memory allocated can be released. + +5. Delete the RDB store. + + Use **deleteRdbStore()** to delete the RDB store and related database files. + + Example: + + Stage model: + + + ```js + import UIAbility from '@ohos.app.ability.UIAbility'; + + class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage) { + relationalStore.deleteRdbStore(this.context, 'RdbTest.db', (err) => { + if (err) { + console.error(`Failed to delete RdbStore. Code:${err.code}, message:${err.message}`); + return; + } + console.info('Succeeded in deleting RdbStore.'); + }); + } +} + ``` + + FA model: + + + ```js + import featureAbility from '@ohos.ability.featureAbility'; + + // Obtain the context. + let context = featureAbility.getContext(); + + relationalStore.deleteRdbStore(context, 'RdbTest.db', (err) => { + if (err) { + console.error(`Failed to delete RdbStore. Code:${err.code}, message:${err.message}`); + return; + } + console.info('Succeeded in deleting RdbStore.'); + }); + ``` diff --git a/en/application-dev/database/data-reliability-security-overview.md b/en/application-dev/database/data-reliability-security-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..049007876bff8cbf8b0bf6b3cad9cd28a80c92ff --- /dev/null +++ b/en/application-dev/database/data-reliability-security-overview.md @@ -0,0 +1,64 @@ +# Data Reliability and Security Overview + +## Introduction + +During system running, a database fault may occur due to storage damage, insufficient storage space, file system permission, or system power-off. The database fault may cause data loss. For example, the database corruption of Contacts causes the loss of Contacts data. The data management subsystem provides the following solutions and capabilities to ensure data reliability and security: + +- Data backup and restoration: Critical data (such as the bank information) can be backed up and restored from the backup to prevent data loss. + +- Database encryption: The database that stores sensitive information, such as authentication credentials and financial data, can be encrypted to improve data security. + +- Access control by device and data level: The access to data across devices is controlled based on the device security level and data security labels. + +In addition, the backup database is stored in the application sandbox. When the storage space is insufficient, you can delete the local database backup to release space. + + +## Basic Concepts + +Before developing functions related to data reliability and security, understand the following concepts. + + +### Database Backup and Restoration + +- Database backup: OpenHarmony provides full backup of database files. + When backing up a database, you only need to invoke the backup API of the database, without closing the database. + +- Database restoration: You can restore a database from a database backup file. + + +### Database Encryption + +The entire database file can be encrypted to enhance the database security. + + +### Data Rating + +In distributed scenarios, the access to data is controlled based on the device security level and data security labels. + +A higher data security label and device security level indicate stricter encryption and access control measures and higher data security. + + +## Working Principles + + +### Database Backup and Restoration Mechanism + +The data of a database is backed up to the specified file. Subsequent operations on the database do not affect the backup file. The database is overwritten by the specified backup file only when a restoration is performed. + +- KV store backup directory: **/data/service/el1(el2)/public/database/...{appId}/kvdb/backup/...{storeId}** + +- RDB store backup directory: **/data/app/el1(el2)/100/database/...{bundlename}/rdb** + + +### Database Encryption Mechanism + +When encrypting a database, you do not need to pass in the key for encryption. The only thing you need to do is set the database encryption status. The system automatically calls the [HUKS APIs](../reference/apis/js-apis-huks.md) to generate a key and encrypt the database. + + +## Constraints + +- The database encryption key is automatically changed once a year. + +- A maximum of five backup files can be retained for a KV store. + +- Automatic backup of a KV store must be performed when the device is charging and the screen is off. diff --git a/en/application-dev/database/data-sync-of-distributed-data-object.md b/en/application-dev/database/data-sync-of-distributed-data-object.md new file mode 100644 index 0000000000000000000000000000000000000000..d620fae702fd1559e07114e2e650a03248179bcd --- /dev/null +++ b/en/application-dev/database/data-sync-of-distributed-data-object.md @@ -0,0 +1,307 @@ +# Cross-Device Synchronization of Distributed Data Objects + + +## When to Use + +To implement traditional data synchronization between devices, you need to design the message processing logic, including setting up a communication link, sending, receiving, and processing messages, retry mechanism upon errors, and resolving data conflicts. The workload is heavy. In addition, the debugging complexity increases with the number of devices. + +The device status, message sending progress, and data transmitted are variables. If these variables support global access, they can be accessed as local variables on difference devices. This simplifies data synchronization between multiple devices. + +The distributed data object (**DataObject**) implements global access to variables. **DataObject** provides basic data object management capabilities and distributed capabilities. You can use the APIs to create, query, delete, and modify in-memory objects and subscribe to event notifications. OpenHarmony also provides easy-to-use JS APIs for distributed application scenarios to easily implement cross-device data collaboration for the same application. In addition, object status and data changes on different devices can be observed. This feature implements data object collaboration for the same application between multiple devices that form a Super Device. **DataObject** greatly reduces the development workloads compared with the traditional mode. + + +## Basic Concepts + +- Distributed in-memory database
+ The distributed in-memory database caches data in the memory so that applications can quickly access data without persisting data. If the database is closed, the data is not retained. + +- Distributed data object + A distributed data object is an encapsulation of the JS object type. Each distributed data object instance creates a data table in the in-memory database. The in-memory databases created for different applications are isolated from each other. Reading data from and writing data to a distributed data object are mapped to the **get()** and **put()** operations in the corresponding database, respectively. + + The distributed data object can be in the following states in its lifecycle: + + - **Uninitialized**: The distributed data object is not instantiated or has been destroyed. + - **Local**: The data table is created, but the data cannot be synchronized. + - **Distributed**: The data table is created, and there are at least two online devices with the same session ID. In this case, data can be synchronized across devices. If a device is offline or the session ID is empty, the distributed data object changes to the local state. + + +## Working Principles + +**Figure 1** Working mechanism + +![distributedObject](figures/distributedObject.jpg) + +The distributed data objects are encapsulated into JS objects in distributed in-memory databases. This allows the distributed data objects to be operated in the same way as local variables. The system automatically implements cross-device data synchronization. + + +### JS Object Storage and Encapsulation Mechanism + +- An in-memory database is created for each distributed data object instance and identified by a session ID (**SessionId**). The in-memory databases created for different applications are isolated from each other. + +- When a distributed data object is instantiated, all properties of the object are traversed recursively. **Object.defineProperty** is used to define the **set()** and **get()** methods of all properties. The **set()** and **get()** methods correspond to the **put** and **get** operations of a record in the database, respectively. **Key** specifies the property name, and **Value** specifies the property value. + +- When a distributed data object is read or written, the **set()** and **get()** methods are automatically called to perform the related operations to the database. + +**Table 1** Correspondence between a distributed data object and a distributed database + +| Distributed Data Object Instance| Object Instance| Property Name| Property Value| +| -------- | -------- | -------- | -------- | +| Distributed in-memory database| Database identified by **sessionID**| Key of a record in the database| Value of a record in the database| + + +### Cross-Device Synchronization and Data Change Notification Mechanism + +The distributed data object is used to implement data synchronization between objects. You can create a distributed data object and set **sessionID** for the devices on a trusted network. The distributed data objects with the same **sessionID** on different devices can synchronize data with each other. + +As shown in the following figure, distributed data object 1 on device A and device B have the same session ID **session1**. The synchronization relationship of session1 is established between the two objects. + + **Figure 2** Object synchronization relationship + +![distributedObject_sync](figures/distributedObject_sync.jpg) + +For each device, only one object can be added to a synchronization relationship. As shown in the preceding figure, distributed data object 2 of device A cannot be added to session 1 because distributed data object 1 of device A has been added to session 1. + +After the synchronization relationship is established, each session has a copy of shared object data. The distributed data objects added to the same session support the following operations: + + (1) Reading or modifying the data in the session. + + (2) Listening for data changes made by other devices. + + (3) Listening for status changes, such as the addition and removal of other devices. + + +### Minimum Unit to Synchronize + +Attribute is the minimum unit to synchronize in distributed data objects. For example, object 1 in the following figure has three attributes: name, age, and parents. If one of the attributes is changed, only the changed attribute needs to be synchronized. + +**Figure 3** Synchronization of distributed data objects + + +![distributedObject_syncView](figures/distributedObject_syncView.jpg) + + +### Object Persistence Mechanism + +Distributed data objects run in the process space of applications. When the data of a distributed data object is persisted in the distributed database, the data will not be lost after the application exits. + +You need to persist distributed data objects in the following scenarios: + +- Enable an application to retrieve the exact same data after it is opened again. In this case, you need to persist the distributed data object (for example, object 1). After the application is opened again, create a distributed data object (for example, object 2) and set the session ID of object 1 for object 2. Then, the application can retrieve the data of object 1. + +- Enable an application opened on another device to retrieve the exact same data. In this case, you need to persist the distributed data object (for example, object 1) on device A and synchronize the data to device B. Then, create a distributed data object (for example, object 2) and set the session ID of object 1 for object 2. When the application is opened on device B, it can retrieve the same application data used on device A before the application is closed. + + +## Constraints + +- Data synchronization can be implemented across devices only for the applications with the same **bundleName**. + +- Data can be synchronized only for the distributed data objects with the same **sessionID** of the same application. + +- Each distributed data object occupies 100 KB to 150 KB of memory. Therefore, you are advised not to create too many distributed data objects. + +- The maximum size of a distributed data object is 500 KB. + +- It takes about 50 ms from the time when 1 KB of data starts to be modified on a device to the time when another device receives a data change notification. + +- A maximum of 16 distributed data object instances can be created for an application. + +- For optimal performance and user experience, the maximum number of devices for data collaboration is 3. + +- For the distributed data object of the complex type, only the root attribute can be modified. The subordinate attributes cannot be modified. + +- Only JS APIs are supported. + + +## Available APIs + +The following table lists the APIs for cross-device synchronization of distributed data objects. Most of the interfaces are executed asynchronously, using a callback or promise to return the result. The following table uses the callback-based APIs as an example. For more information about the APIs, see [Distributed Data Object](../reference/apis/js-apis-data-distributedobject.md). + +| API| Description| +| -------- | -------- | +| create(context: Context, source: object): DataObject | Creates a distributed data object instance.| +| genSessionId(): string | Generates a session ID for distributed data objects.| +| setSessionId(sessionId: string, callback: AsyncCallback<void>): void | Sets a session ID for data synchronization. Automatic synchronization is performed for devices with the same session ID on a trusted network.| +| setSessionId(callback: AsyncCallback<void>): void | Exits all sessions.| +| on(type: 'change', callback: Callback<{ sessionId: string, fields: Array<string> }>): void | Subscribes to data changes of this distributed data object.| +| on(type: 'status', callback: Callback<{ sessionId: string, networkId: string, status: 'online' \| 'offline' }>): void | Subscribes to status changes of this distributed data object.| +| save(deviceId: string, callback: AsyncCallback<SaveSuccessResponse>): void | Saves a distributed data object.| +| revokeSave(callback: AsyncCallback<RevokeSaveSuccessResponse>): void | Revokes the save operation of the distributed data object.| + + +## How to Develop + +The following example demonstrates how to implement a distributed data object synchronization. + +1. Import the **@ohos.data.distributedDataObject** module. + + ```js + import distributedDataObject from '@ohos.data.distributedDataObject'; + ``` + +2. Request permissions. + + 1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + 2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). + +3. Creates a distributed data object instance. + + Stage model: + + ```js + // Import the module. + import distributedDataObject from '@ohos.data.distributedDataObject'; + import UIAbility from '@ohos.app.ability.UIAbility'; + + class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage) { + // Create a distributed data object, which contains attributes of the string, number, boolean, and object types. + let localObject = distributedDataObject.create(this.context, { + name: 'jack', + age: 18, + isVis: false, + parent: { mother: 'jack mom', father: 'jack Dad' }, + list: [{ mother: 'jack mom' }, { father: 'jack Dad' }] + }); + } + } + ``` + + FA model: + + + ```js + // Import the module. + import distributedDataObject from '@ohos.data.distributedDataObject'; + import featureAbility from '@ohos.ability.featureAbility'; + // Obtain the context. + let context = featureAbility.getContext(); + // Create a distributed data object, which contains attributes of the string, number, boolean, and object types. + let localObject = distributedDataObject.create(context, { + name: 'jack', + age: 18, + isVis: false, + parent: { mother: 'jack mom', father: 'jack Dad' }, + list: [{ mother: 'jack mom' }, { father: 'jack Dad' }] + }); + ``` + +4. Set the same session ID for the distributed data objects for data synchronization. The data objects in the synchronization network include the local and remote objects. + + ```js + // Set a session ID, for example, 123456, for device 1. + let sessionId = '123456'; + + localObject.setSessionId(sessionId); + + // Set the same session ID for device 2. + + // Create a distributed data object, which contains attributes of the string, number, boolean, and object types. + let remoteObject = distributedDataObject.create(this.context, { + name: undefined, + age: undefined, // undefined indicates that the data comes from the peer end. + isVis: true, + parent: undefined, + list: undefined + }); + // After learning that the device goes online, the remote object synchronizes data. That is, name changes to jack and age to 18. + remoteObject.setSessionId(sessionId); + ``` + +5. Observe data changes of a distributed data object. You can subscribe to data changes of the remote object. When the data in the remote object changes, a callback will be invoked to return a data change event. + + ```js + function changeCallback(sessionId, changeData) { + console.info(`change: ${sessionId}`); + + if (changeData !== null && changeData !== undefined) { + changeData.forEach(element => { + console.info(`The element ${localObject[element]} changed.`); + }); + } + } + + // To refresh the page in changeCallback, correctly bind (this) to the changeCallback. + localObject.on("change", this.changeCallback.bind(this)); + ``` + +6. Modify attributes of the distributed data object. The object attributes support basic data types (number, Boolean, and string) and complex data types (array and nested basic types). + + ```js + localObject.name = 'jack1'; + localObject.age = 19; + localObject.isVis = false; + localObject.parent = { mother: 'jack1 mom', father: 'jack1 Dad' }; + localObject.list = [{ mother: 'jack1 mom' }, { father: 'jack1 Dad' }]; + ``` + + > **NOTE** + > + > For the distributed data object of the complex type, only the root attribute can be modified. The subordinate attributes cannot be modified. + + + ```js + // Supported modification. + localObject.parent = { mother: 'mom', father: 'dad' }; + // Modification not supported. + localObject.parent.mother = 'mom'; + ``` + +7. Access a distributed data object. Obtain the distributed data object attributes, which are the latest data on the network. + + ```js + console.info(`name:${localObject['name']}`); + ``` + +8. Unsubscribe from data changes. You can specify the callback to unregister. If you do not specify the callback, all data change callbacks of the distributed data object will be unregistered. + + ```js + // Unregister this.changeCallback. + localObject.off('change', this.changeCallback); + // Unregister all data change callbacks. + localObject.off('change'); + ``` + +9. Subscribes to status changes of a distributed data object. A callback will be invoked to report the status change when the target distributed data object goes online or offline. + + ```js + function statusCallback(sessionId, networkId, status) { + // Service processing. + } + + localObject.on('status', this.statusCallback); + ``` + +10. Save a distributed data object and revoke the data saving operation. + + ```js + // Save the data object if the device on the network needs to retrieve the object data after the application exits. + localObject.save('local').then((result) => { + console.info(`Succeeded in saving. SessionId:${result.sessionId},version:${result.version},deviceId:${result.deviceId}`); + }).catch((err) => { + console.error(`Failed to save. Code:${err.code},message:${err.message}`); + }); + + // Revoke the save of a distributed data object. + localObject.revokeSave().then((result) => { + console.info(`Succeeded in revokeSaving. Session:${result.sessionId}`); + }).catch((err) => { + console.error(`Failed to revokeSave. Code:${err.code},message:${err.message}`); + }); + ``` + +11. Unsubscribe from the status changes of a distributed data object. You can specify the callback to unregister. If you do not specify the callback, this API unregisters all status change callbacks of this distributed data object. + + ```js + // Unregister this.statusCallback. + localObject.off('status', this.statusCallback); + // Unregister all status change callbacks. + localObject.off('status'); + ``` + +12. Remove a distributed data object from the synchronization network. The data of the removed distributed data object will not be synchronized to other devices. + + ```js + localObject.setSessionId(() => { + console.info('leave all lession.'); + }); + ``` diff --git a/en/application-dev/database/data-sync-of-kv-store.md b/en/application-dev/database/data-sync-of-kv-store.md new file mode 100644 index 0000000000000000000000000000000000000000..e9b4fff51f15ca2339715c626c9e4e4bba7e4d45 --- /dev/null +++ b/en/application-dev/database/data-sync-of-kv-store.md @@ -0,0 +1,278 @@ +# Cross-Device Synchronization of KV Stores + + +## When to Use + +KV Stores are suitable for storing service data with simple relationships. It provides higher read and write performance than the SQL database. KV stores are widely used because the simplicity of the KV data model poses fewer database version compatibility issues in distributed scenarios and simplifies conflict handling in data synchronization. + + +## Basic Concepts + +Before implementing cross-device synchronization of KV stores, understand the following concepts: + + +### Single KV Store + +In a single KV store, data is saved in the unit of a single entry. When data is modified locally, the data entry is updated no matter whether it has been synchronized. Only one copy of data is retained globally for multiple devices. The data of the latest time is kept for the same entry (with the same primary code) of multiple devices. The data in single KV stores is not differentiated by device. If the data modified on multiple devices has the same key, the value will be overwritten. For the data written or modified locally, the data with the latest time is synchronized to other devices. Single KV stores are used to store information, such as the Contacts and weather application data. + +![singleKVStore](figures/singleKVStore.jpg) + + +### Device KV Store + +In a device KV store, the local device ID is added before the key of the KV pair stored by an application. In this way, the data of different devices is isolated. Data is managed by device and can be queried by device. + +The underlying devices manage the data by device. The device KV stores support distributed data query by device, but do not support modification of the data synchronized from peer devices. Device KV stores are used to store the data that needs to be accessed by device, such as the Gallery thumbnails. + +![deviceKVStore](figures/deviceKVStore.jpg) + + +## Synchronization Types + +The **DatamgrService** provides the following synchronization types: + + +- Manual synchronization: The application calls **sync()** to trigger a synchronization. The list of devices to be synchronized and the synchronization mode must be specified. The synchronization mode can be **PULL_ONLY** (pulling remote data to the local end), **PUSH_ONLY** (pushing local data to the remote end), or **PUSH_PULL** (pushing local data to the remote end and pulling remote data to the local end). You can use the [**sync()** with the **query** parameter](../reference/apis/js-apis-distributedKVStore.md#sync-1) to synchronize the data that meets the specified conditions. The manual synchronization is available only for system applications. + +- Automatic synchronization: The distributed database automatically pushes local data to the remote end and pulls remote data to the local end. An automatic synchronization is triggered when a device goes online or an application updates data. + + +## Working Principles + +After completing device discovery and authentication, the underlying communication component notifies the application that the device goes online. The **DatamgrService** then establishes an encrypted transmission channel to synchronize data between the two devices. + + +### Cross-Device Data Synchronization Mechanism + +![kvStore](figures/kvStore.jpg) + +When **put()** or **delete()** is called successfully, an automatic synchronization is triggered. The distributed data is sent to the peer device through the communication adaptation layer for synchronization. + +If **sync()** is called successfully, a manual synchronization is triggered to send distributed data to the peer device through the communication adaptation layer. + + +### Data Change Notification Mechanism + +When data is added, deleted, or modified, a notification is sent to the subscriber. The notifications can be classified into the following types: + +- Local data change notification: subscription of the application data changes on the local device. When the data in the local KV store is added, deleted, or modified in the database, a notification is received. + +- Distributed data change notification: subscription of the application data changes of other devices in the network. When the data in the local KV store changes after being synchronized with data from another device in the same network, a notification is received. + + +## Constraints + +- For each record in a device KV store, the key cannot exceed 896 bytes and the value cannot exceed 4 MB. + +- For each record in a single KV store, the key cannot exceed 1 KB and the value cannot exceed 4 MB. + +- The KV stores do not support custom conflict resolution policies for applications. + +- A maximum of 16 KV stores can be opened simultaneously for an application. + +- Each KV store supports a maximum of eight callbacks for subscription of data change notifications. + +- The manual synchronization is available only for system applications. + + +## Available APIs + +The following table lists the APIs for cross-device data synchronization of the single KV store. Most of the APIs are executed asynchronously, using a callback or promise to return the result. The following table uses the callback-based APIs as an example. For more information about the APIs, see [Distributed KV Store](../reference/apis/js-apis-distributedKVStore.md). + +| API| Description| +| -------- | -------- | +| createKVManager(config: KVManagerConfig): KVManager | Creates a **KvManager** instance to manage database objects.| +| getKVStore<T>(storeId: string, options: Options, callback: AsyncCallback<T>): void | Creates and obtains a KV store of the specified type.| +| put(key: string, value: Uint8Array\|string\|number\|boolean, callback: AsyncCallback<void>): void | Inserts and updates data.| +| on(event: 'dataChange', type: SubscribeType, listener: Callback<ChangeNotification>): void | Subscribes to data changes in the KV store.| +| get(key: string, callback: AsyncCallback<boolean \| string \| number \| Uint8Array>): void | Queries the value of the specified key.| +| sync(deviceIds: string[], mode: SyncMode, delayMs?: number): void | Triggers a manual synchronization of the KV store.| + + +## How to Develop + +The following uses a single KV store as an example to describe how to implement cross-device data synchronization. The development process is as follows. + +![kvStore_development_process](figures/kvStore_development_process.png) + +> **NOTE** +> +> The data on a device can be synchronized only to the devices whose data security labels are not higher than the security level of the device. For details, see [Access Control Mechanism in Cross-Device Synchronization](sync-app-data-across-devices-overview.md#access-control-mechanism-in-cross-device-synchronization). + +1. Import the module. + + ```js + import distributedKVStore from '@ohos.data.distributedKVStore'; + ``` + +2. Request permissions. + + 1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + 2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). + +3. Create a **KvManager** instance based on the specified **KvManagerConfig** object. + + 1. Create a **kvManagerConfig** object based on the application context. + 2. Create a **KvManager** instance. + + + ```js + // Obtain the context of the stage model. + import UIAbility from '@ohos.app.ability.UIAbility'; + let kvManager; + let context = null; + + class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage) { + context = this.context; + } + } + + // Obtain the context of the FA model. + import featureAbility from '@ohos.ability.featureAbility'; + + let context = featureAbility.getContext(); + + // Construct a kvManager instance. + try { + const kvManagerConfig = { + bundleName: 'com.example.datamanagertest', + context: context + } + kvManager = distributedKVStore.createKVManager(kvManagerConfig); + console.info('Succeeded in creating KVManager.'); + // Create and obtain the KV store. + } catch (e) { + console.error(`Failed to create KVManager. Code:${e.code},message:${e.message}`); + } + ``` + +4. Obtain the KV store of the specified type. + + 1. Declare the ID of the distributed KV store to create. + 2. Disable the auto synchronization function (**autoSync:false**) to facilitate subsequent verification of the synchronization function. If synchronization is required, call the **sync()** interface. + + + ```js + try { + const options = { + createIfMissing: true, + encrypt: false, + backup: false, + autoSync: false, + // If kvStoreType is left empty, a device KV store is created by default. + kvStoreType: distributedKVStore.KVStoreType.SINGLE_VERSION, + // Device KV store: kvStoreType: distributedKVStore.KVStoreType.DEVICE_COLLABORATION, + securityLevel: distributedKVStore.SecurityLevel.S1 + }; + kvManager.getKVStore('storeId', options, (err, kvStore) => { + if (err) { + console.error(`Failed to get KVStore: Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in getting KVStore.'); + // Perform related data operations. + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +5. Subscribe to changes of distributed data. + + ```js + try { + kvStore.on('dataChange', distributedKVStore.SubscribeType.SUBSCRIBE_TYPE_ALL, (data) => { + console.info(`dataChange callback call data: ${data}`); + }); + } catch (e) { + console.error(`An unexpected error occurred. code:${e.code},message:${e.message}`); + } + ``` + +6. Write data to the single KV store. + + 1. Construct the key and value to be written to the single KV store. + 2. Write KV pairs to the single KV store. + + + ```js + const KEY_TEST_STRING_ELEMENT = 'key_test_string'; + const VALUE_TEST_STRING_ELEMENT = 'value_test_string'; + try { + kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Failed to put data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + }); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + ``` + +7. Query data in the single KV store. + + 1. Construct the key to be queried from the single KV store. + 2. Query data from the single KV store. + + + ```js + const KEY_TEST_STRING_ELEMENT = 'key_test_string'; + const VALUE_TEST_STRING_ELEMENT = 'value_test_string'; + try { + kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, (err) => { + if (err !== undefined) { + console.error(`Failed to put data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in putting data.'); + kvStore.get(KEY_TEST_STRING_ELEMENT, (err, data) => { + if (err != undefined) { + console.error(`Failed to get data. Code:${err.code},message:${err.message}`); + return; + } + console.info(`Succeeded in getting data. Data:${data}`); + }); + }); + } catch (e) { + console.error(`Failed to get data. Code:${e.code},message:${e.message}`); + } + ``` + +8. Synchronize data to other devices. + + Select the devices to be synchronized with data and the synchronization mode. The user needs to confirm the synchronization mode when the application is started for the first time. + + > **NOTE** + > + > If manual synchronization is used, **deviceIds** is obtained by using [devManager.getTrustedDeviceListSync](../reference/apis/js-apis-device-manager.md#gettrusteddevicelistsync). The APIs of the **deviceManager** module are all system interfaces and available only to system applications. + + + ```js + import deviceManager from '@ohos.distributedHardware.deviceManager'; + + let devManager; + // create deviceManager + deviceManager.createDeviceManager('bundleName', (err, value) => { + if (!err) { + devManager = value; + // deviceIds is obtained by devManager.getTrustedDeviceListSync. + let deviceIds = []; + if (devManager !== null) { + // The ohos.permission.ACCESS_SERVICE_DM permission is required. This permission is available only for system applications. + let devices = devManager.getTrustedDeviceListSync(); + for (let i = 0; i < devices.length; i++) { + deviceIds[i] = devices[i].deviceId; + } + } + try { + // 1000 indicates the maximum delay, in ms. + kvStore.sync(deviceIds, distributedKVStore.SyncMode.PUSH_ONLY, 1000); + } catch (e) { + console.error(`An unexpected error occurred. Code:${e.code},message:${e.message}`); + } + } + }); + ``` diff --git a/en/application-dev/database/data-sync-of-rdb-store.md b/en/application-dev/database/data-sync-of-rdb-store.md new file mode 100644 index 0000000000000000000000000000000000000000..c47d95a7969d44a96396e2e6e77dd4c958468748 --- /dev/null +++ b/en/application-dev/database/data-sync-of-rdb-store.md @@ -0,0 +1,173 @@ +# Cross-Device Synchronization of RDB Stores + + +## When to Use + +When creating a data table, you can set the table to support cross-device access. You can also use APIs to move the data to be accessed across devices to a distributed data. + + +## Basic Concepts + +OpenHamony supports synchronization of the relational data of an application across multiple devices. + +- Distributed table list
After a table is created for an application in an RDB store, you can set it as a distributed table. When querying the RDB store of a remote device, you can obtain the distributed table name of the remote device based on the local table name. + +- Synchronization mode
Data can be synchronized between devices in either of the following ways:
- Pushing data from a local device to a remote device.
- Pulling data from a remote device to a local device. + + +## Working Principles + +After completing device discovery and authentication, the underlying communication component notifies the application that the device goes online. The **DatamgrService** then establishes an encrypted transmission channel to synchronize data between the two devices. + + +### Cross-Device Data Synchronization Mechanism + +![relationalStore_sync](figures/relationalStore_sync.jpg) + +After writing data to an RDB store, the service sends a synchronization request to the **DatamgrService**. + +The **DatamgrService** reads the data to be synchronized from the application sandbox and sends the data to the **DatamgrService** of the target device based on the **deviceId** of the peer device. Then, the **DatamgrService** writes the data to the RDB of the same application. + + +### Data Change Notification Mechanism + +When data is added, deleted, or modified, a notification is sent to the subscriber. The notifications can be classified into the following types: + +- Local data change notification: subscription of the application data changes on the local device. When the data in the local KV store is added, deleted, or modified in the database, a notification is received. + +- Distributed data change notification: subscription of the application data changes of other devices in the network. When the data in the local RDB store changes after being synchronized with data from another device in the same network, a notification is received. + + +## Constraints + +- A maximum of 16 RDB stores can be opened simultaneously for an application. + +- Each RDB store supports a maximum of eight callbacks for subscription of data change notifications. + +- Third-party applications cannot call the distributed APIs that must be specified with the device. + + +## Available APIs + +The following table lists the APIs for cross-device data synchronization of RDB stores. Most of the APIs are executed asynchronously, using a callback or promise to return the result. The following table uses the callback-based APIs as an example. For more information about the APIs, see [RDB Store](../reference/apis/js-apis-data-relationalStore.md). + +| API| Description| +| -------- | -------- | +| setDistributedTables(tables: Array<string>, callback: AsyncCallback<void>): void | Sets the distributed tables to be synchronized.| +| sync(mode: SyncMode, predicates: RdbPredicates, callback: AsyncCallback<Array<[string, number]>>): void | Synchronizes data across devices.| +| on(event: 'dataChange', type: SubscribeType, observer: Callback<Array<string>>): void | Subscribes to changes in the distributed data.| +| off(event:'dataChange', type: SubscribeType, observer: Callback<Array<string>>): void | Unsubscribe from changes in the distributed data.| +| obtainDistributedTableName(device: string, table: string, callback: AsyncCallback<string>): void; | Obtains the table name on the specified device based on the local table name.| +| remoteQuery(device: string, table: string, predicates: RdbPredicates, columns: Array<string> , callback: AsyncCallback<ResultSet>): void | Queries data from the RDB store of a remote device based on specified conditions.| + + +## How to Develop + +> **NOTE** +> +> The data on a device can be synchronized only to the devices whose data security labels are not higher than the security level of the device. For details, see [Access Control Mechanism in Cross-Device Synchronization](sync-app-data-across-devices-overview.md#access-control-mechanism-in-cross-device-synchronization). + +1. Import the module. + + ```js + import relationalStore from '@ohos.data.relationalStore'; + ``` + +2. Request permissions. + + 1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + 2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization). + +3. Create an RDB store and set a table for distributed synchronization. + + ```js + const STORE_CONFIG = { + name: 'RdbTest.db', // Database file name. + securityLevel: relationalStore.SecurityLevel.S1 // Database security level. + }; + relationalStore.getRdbStore(this.context, STORE_CONFIG, (err, store) => { + store.executeSql('CREATE TABLE IF NOT EXISTS EMPLOYEE (ID INTEGER PRIMARY KEY AUTOINCREMENT, NAME TEXT NOT NULL, AGE INTEGER, SALARY REAL, CODES BLOB)', null, (err) => { + // Set the table for distributed synchronization. + store.setDistributedTables(['EMPLOYEE']); + // Perform related operations. + }) + }) + ``` + +4. Synchronize data across devices. After **sync()** is called to trigger a synchronization, data is synchronized from the local device to all other devices on the network. + + ```js + // Construct the predicate object for synchronizing the distributed table. + let predicates = new relationalStore.RdbPredicates('EMPLOYEE'); + // Call sync() to synchronize data. + store.sync(relationalStore.SyncMode.SYNC_MODE_PUSH, predicates, (err, result) => { + // Check whether data synchronization is successful. + if (err) { + console.error(`Failed to sync data. Code:${err.code},message:${err.message}`); + return; + } + console.info('Succeeded in syncing data.'); + for (let i = 0; i < result.length; i++) { + console.info(`device:${result[i][0]},status:${result[i][1]}`); + } + }) + ``` + +5. Subscribe to changes in the distributed data. The data synchronization triggers the **observer** callback registered in **on()**. The input parameter of the callback is the ID of the device whose data changes. + + ```js + let observer = function storeObserver(devices) { + for (let i = 0; i < devices.length; i++) { + console.info(`The data of device:${devices[i]} has been changed.`); + } + } + + try { + // Register an observer to listen for the changes of the distributed data. + // When data in the RDB store changes, the registered callback will be invoked to return the data changes. + store.on('dataChange', relationalStore.SubscribeType.SUBSCRIBE_TYPE_REMOTE, observer); + } catch (err) { + console.error('Failed to register observer. Code:${err.code},message:${err.message}'); + } + + // You can unsubscribe from the data changes if required. + try { + store.off('dataChange', relationalStore.SubscribeType.SUBSCRIBE_TYPE_REMOTE, observer); + } catch (err) { + console.error('Failed to register observer. Code:${err.code},message:${err.message}'); + } + ``` + +6. Query data across devices. If data synchronization is not complete or triggered, an application can call **remoteQuery()** to query data from a remote device. + + > **NOTE** + > + > **deviceIds** is obtained by using [devManager.getTrustedDeviceListSync](../reference/apis/js-apis-device-manager.md#gettrusteddevicelistsync). The APIs of the **deviceManager** module are all system interfaces and available only to system applications. + + + ```js + // Obtain device IDs. + import deviceManager from '@ohos.distributedHardware.deviceManager'; + + deviceManager.createDeviceManager("com.example.appdatamgrverify", (err, manager) => { + if (err) { + console.info(`Failed to create device manager. Code:${err.code},message:${err.message}`); + return; + } + let devices = manager.getTrustedDeviceListSync(); + let deviceId = devices[0].deviceId; + + // Construct a predicate object for querying the distributed table. + let predicates = new relationalStore.RdbPredicates('EMPLOYEE'); + // Query data from the specified remote device and return the query result. + store.remoteQuery(deviceId, 'EMPLOYEE', predicates, ['ID', 'NAME', 'AGE', 'SALARY', 'CODES'], + function (err, resultSet) { + if (err) { + console.error(`Failed to remoteQuery data. Code:${err.code},message:${err.message}`); + return; + } + console.info(`ResultSet column names: ${resultSet.columnNames}, column count: ${resultSet.columnCount}`); + } + ) + }) + ``` diff --git a/en/application-dev/database/database-datashare-guidelines.md b/en/application-dev/database/database-datashare-guidelines.md deleted file mode 100644 index 580811158051b5b6d5d2137f4b14654a46e891b9..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-datashare-guidelines.md +++ /dev/null @@ -1,263 +0,0 @@ -# DataShare Development -The **DataShare** module allows an application to manage its own data and share data with other applications. Currently, data can be shared only between applications on the same device. - -## Available APIs - -**Table 1** APIs of the data provider - -|API|Description| -|:------|:------| -|onCreate?(want: Want, callback: AsyncCallback<void>): void|Called to initialize service logic when the data provider application is created, for example, when a database is created.| -|insert?(uri: string, valueBucket: ValuesBucket, callback: AsyncCallback<number>): void|Inserts data into the database.| -|update?(uri: string, predicates: DataSharePredicates, valueBucket: ValuesBucket, callback: AsyncCallback<number>): void|Updates data in the database.| -|query?(uri: string, predicates: DataSharePredicates, columns: Array<string>, callback: AsyncCallback<Object>): void|Queries data from the database.| -|delete?(uri: string, predicates: DataSharePredicates, callback: AsyncCallback<number>): void|Deletes data from the database.| - -For details about the data provider APIs, see [DataShareExtensionAbility](../reference/apis/js-apis-application-dataShareExtensionAbility.md). - -**Table 2** APIs of the data consumer - -| API | Description | -| :----------------------------------------------------------- | :--------------------------------- | -| createDataShareHelper(context: Context, uri: string, callback: AsyncCallback<DataShareHelper>): void | Creates a **DataShareHelper** instance. | -| insert(uri: string, value: ValuesBucket, callback: AsyncCallback<number>): void | Inserts a single data record into the database. | -| update(uri: string, predicates: DataSharePredicates, value: ValuesBucket, callback: AsyncCallback<number>): void | Updates data in the database. | -| query(uri: string, predicates: DataSharePredicates, columns: Array<string>, callback: AsyncCallback<DataShareResultSet>): void | Queries data from the database. | -| delete(uri: string, predicates: DataSharePredicates, callback: AsyncCallback<number>): void | Deletes one or more data records from the database.| - -For more information, see [DataShareHelper](../reference/apis/js-apis-data-dataShare.md). - -## When to Use - -There are two roles in **DataShare**: - -- Data provider: adds, deletes, modifies, and queries data, opens files, and shares data. -- Data consumer: accesses the data provided by the provider using **DataShareHelper**. - -### Data Provider Application Development (for System Applications Only) - -[DataShareExtensionAbility](../reference/apis/js-apis-application-dataShareExtensionAbility.md) provides the following APIs. You can override these APIs as required. - -- **onCreate** - - Called by the server to initialize service logic when the **DataShare** client connects to the **DataShareExtensionAbility** server. - -- **insert** - - Inserts data. This API is called when the client requests to insert data. - -- **update** - - Updates data. This API is called when the client requests to update data. - -- **delete** - - Deletes data. This API is called when the client requests to delete data. - -- **query** - - Queries data. This API is called when the client requests to query data. - -- **batchInsert** - - Batch inserts data. This API is called when the client requests to batch insert data. - -- **normalizeUri** - - Converts the URI provided by the client to the URI used by the server. - -- **denormalizeUri** - - Converts the URI used by the server to the initial URI passed by the client. - -Before implementing a **DataShare** service, create a **DataShareExtensionAbility** object in the DevEco Studio project as follows: - -1. In the **ets** directory of the **Module** project, right-click and choose **New > Directory** to create a directory named **DataShareAbility**. - -2. Right-click the **DataShareAbility** directory, and choose **New > TypeScript File** to create a file named **DataShareAbility.ts**. - -3. In the **DataShareAbility.ts** file, import **DataShareExtensionAbility** and other dependencies. - - ```ts - import Extension from '@ohos.application.DataShareExtensionAbility'; - import rdb from '@ohos.data.relationalStore'; - import fileIo from '@ohos.fileio'; - import dataSharePredicates from '@ohos.data.dataSharePredicates'; - ``` - -4. Override **DataShareExtensionAbility** APIs based on actual requirements. For example, if the data provider provides only data query, override only **query()**. - -5. Implement the data provider services. For example, implement data storage of the data provider by using a database, reading and writing files, or accessing the network. - - ```ts - const DB_NAME = "DB00.db"; - const TBL_NAME = "TBL00"; - const DDL_TBL_CREATE = "CREATE TABLE IF NOT EXISTS " - + TBL_NAME - + " (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, age INTEGER, isStudent BOOLEAN, Binary BINARY)"; - - let rdbStore; - let result; - - export default class DataShareExtAbility extends Extension { - private rdbStore_; - - // Override onCreate(). - onCreate(want, callback) { - result = this.context.cacheDir + '/datashare.txt'; - // Create an RDB store. - rdb.getRdbStore(this.context, { - name: DB_NAME, - securityLevel: rdb.SecurityLevel.S1 - }, function (err, data) { - rdbStore = data; - rdbStore.executeSql(DDL_TBL_CREATE, [], function (err) { - console.log('DataShareExtAbility onCreate, executeSql done err:' + JSON.stringify(err)); - }); - if (callback) { - callback(); - } - }); - } - - // Override query(). - query(uri, predicates, columns, callback) { - if (predicates == null || predicates == undefined) { - console.info('invalid predicates'); - } - try { - rdbStore.query(TBL_NAME, predicates, columns, function (err, resultSet) { - if (resultSet != undefined) { - console.info('resultSet.rowCount: ' + resultSet.rowCount); - } - if (callback != undefined) { - callback(err, resultSet); - } - }); - } catch (err) { - console.error('error' + err); - } - } - // Override other APIs as required. - // ... - }; - ``` - -6. Define **DataShareExtensionAbility** in **module.json5**. - - | Field | Description | Mandatory | - | ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | "name" | Ability name, corresponding to the **ExtensionAbility** class name derived from **Ability**. | Yes | - | "type" | Ability type. The value is **dataShare**, indicating the development is based on the **datashare** template. | Yes | - | "uri" | URI used for communication. It is the unique identifier for the data consumer to connect to the provider. | Yes | - | "visible" | Whether it is visible to other applications. Data sharing is allowed only when the value is **true**. | Yes | - | "metadata" | Configuration for silent access, including the **name** and **resource** fields.
The **name** field identifies the configuration, which has a fixed value of **ohos.extension.dataShare**.
The **resource** field has a fixed value of **$profile:data_share_config**, which indicates that the profile name is **data_share_config.json**. | **metadata** is mandatory when the ability launch type is **singleton**. For details about the ability launch type, see **launchType** in the [Internal Structure of the abilities Attribute](../quick-start/module-structure.md#internal-structure-of-the-abilities-attribute). | - - **module.json5 example** - - ```json - "extensionAbilities": [ - { - "srcEntrance": "./ets/DataShareExtAbility/DataShareExtAbility.ts", - "name": "DataShareExtAbility", - "icon": "$media:icon", - "description": "$string:description_datashareextability", - "type": "dataShare", - "uri": "datashare://com.samples.datasharetest.DataShare", - "visible": true, - "metadata": [{"name": "ohos.extension.dataShare", "resource": "$profile:data_share_config"}] - } - ] - ``` - - **data_share_config.json Description** - - | Field | Description | Mandatory | - | ----------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | "tableConfig" | Label configuration. | Yes | - | "uri" | Range for which the configuration takes effect. The URI supports the following formats in descending order by priority:
- **\***: indicates all databases and tables.
- **datashare:///{bundleName\}/{moduleName\}/{storeName\}**: specifies a database.
- **datashare:///{bundleName\}/{moduleName\}/{storeName\}/{tableName\}**: specifies a table.
If URIs of different formats are configured, only the URI with higher priority takes effect. | Yes | - | "crossUserMode" | Whether data is shared by multiple users. The value **1** means to share data between multiple users, and the value **2** means the opposite. | **crossUserMode** is mandatory when the ability launch type is **singleton**. For details about the ability launch type, see **launchType** in the [Internal Structure of the abilities Attribute](../quick-start/module-structure.md#internal-structure-of-the-abilities-attribute). | - | "writePermission" | Write permission required for silent access. | No | - | "readPermission" | Read permission required for silent access. | No | - - **data_share_config.json Example** - - ```json - "tableConfig": [ - { - "uri": "*", - "writePermission": "ohos.permission.xxx" - }, - { - "uri": "datashare:///com.acts.datasharetest/entry/DB00", - "crossUserMode": 1, - "writePermission": "ohos.permission.xxx", - "readPermission": "ohos.permission.xxx" - }, - { - "uri": "datashare:///com.acts.datasharetest/entry/DB00/TBL00", - "crossUserMode": 2 - } - ] - ``` - - - -### Data Consumer Application Development - -1. Import dependencies. - - ```ts - import UIAbility from '@ohos.app.ability.UIAbility'; - import dataShare from '@ohos.data.dataShare'; - import dataSharePredicates from '@ohos.data.dataSharePredicates'; - ``` - -2. Define the URI string for communicating with the data provider. - - ```ts - // Different from the URI defined in the module.json5 file, the URI passed in the parameter has an extra slash (/), because there is a DeviceID parameter between the second and the third slash (/). - let dseUri = ('datashare:///com.samples.datasharetest.DataShare'); - ``` - -3. Create a **DataShareHelper** instance. - - ```ts - let dsHelper; - let abilityContext; - - export default class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage) { - abilityContext = this.context; - dataShare.createDataShareHelper(abilityContext, dseUri, (err, data)=>{ - dsHelper = data; - }); - } - } - ``` - -4. Use the APIs provided by **DataShareHelper** to access the services provided by the provider, for example, adding, deleting, modifying, and querying data. - - ```ts - // Construct a piece of data. - let valuesBucket = { "name": "ZhangSan", "age": 21, "isStudent": false, "Binary": new Uint8Array([1, 2, 3]) }; - let updateBucket = { "name": "LiSi", "age": 18, "isStudent": true, "Binary": new Uint8Array([1, 2, 3]) }; - let predicates = new dataSharePredicates.DataSharePredicates(); - let valArray = ['*']; - // Insert a piece of data. - dsHelper.insert(dseUri, valuesBucket, (err, data) => { - console.log('dsHelper insert result: ' + data); - }); - // Update data. - dsHelper.update(dseUri, predicates, updateBucket, (err, data) => { - console.log('dsHelper update result: ' + data); - }); - // Query data. - dsHelper.query(dseUri, predicates, valArray, (err, data) => { - console.log('dsHelper query result: ' + data); - }); - // Delete data. - dsHelper.delete(dseUri, predicates, (err, data) => { - console.log('dsHelper delete result: ' + data); - }); - ``` diff --git a/en/application-dev/database/database-datashare-overview.md b/en/application-dev/database/database-datashare-overview.md deleted file mode 100644 index 53fbd723922ea602e694e0844d2486b13f48538c..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-datashare-overview.md +++ /dev/null @@ -1,56 +0,0 @@ -# DataShare Overview - -## Introduction - -The **DataShare** module allows an application to manage its own data and share data with other applications. Currently, data can be shared only between applications on the same device. - -**DataShare** must be used together with [DataShareExtensionAbility](../reference/apis/js-apis-application-dataShareExtensionAbility.md). - -Data needs to be shared in a wealth of scenarios. For example, contacts, short message service (SMS), and media gallery always needs to be shared. However, certain data, such as accounts and passwords, cannot be shared. Some data, such as SMS messages, can be queried but not modified by other applications. **DataShare** provides a secure data sharing mechanism for applications in a variety of scenarios. - -The data provider can directly use the **DataShare** framework to share data with other applications without complex encapsulation. The data consumer only needs to learn and use a set of interfaces because the data access mode does not vary with the data provisioning mode. This greatly reduces the learning time and development difficulty. - -## Basic Concepts - - -Before you get started, familiarize yourself with the following concepts: - - -- Data provider - - The **DataShareExtensionAbility** based on the stage model implements functions, such as selectively adding, deleting, modifying, and querying data, and opening files. It implements services related to cross-application data sharing. - -- Data consumer - - The data consumer uses **DataShareHelper**, a utility class created by [createDataShareHelper()](../reference/apis/js-apis-data-dataShare.md#datasharecreatedatasharehelper), to access the data provided by the data provider. - -- **ValuesBucket** - - One or more data records stored in the form of key-value (KV) pairs. The keys are of the string type. The values can be of the number, string, Boolean, or Unit8Array type. - -- Result set - - A collection of query results. Flexible data access modes are provided for users to obtain data. - -- Predicate - - Conditions specified for updating, deleting, or querying data in the database. - -## Working Principles - -**Figure 1** DataShare mechanism - - -![](figures/en_DataShare.png) - -- The **DataShareExtAbility** module, as the data provider, implements services related to data sharing between applications. -- The **DataShareHelper** module, as the data consumer, provides interfaces for accessing data, including adding, deleting, modifying, and querying data. -- The data consumer communicates with the data provider using inter-process communication (IPC). The data provider can be implemented through a database or other data storage. - -- The **ResultSet** module is implemented through shared memory. Shared memory stores the result sets, and interfaces are provided to traverse result sets. - -## Constraints - -- **DataShare** is subject to the limitations on the database used by the data provider. For example, the supported data models, length of the keys and values, and maximum number of databases that can be accessed at a time by each application vary with the database in use. - -- The payloads of **ValuesBucket**, predicates, and result sets are restricted by IPC. diff --git a/en/application-dev/database/database-distributedobject-guidelines.md b/en/application-dev/database/database-distributedobject-guidelines.md deleted file mode 100644 index dcbc34b48912020e0a7c6e0c987ce5de1d0b75c8..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-distributedobject-guidelines.md +++ /dev/null @@ -1,282 +0,0 @@ -# Distributed Data Object Development - -## When to Use - -The **distributedDataObject** module provides APIs to implement data collaboration of the same application across multiple devices. In addition, the devices that form a Super Device can listen for object status and data changes with each other. - -For example, when the data of a distributed data object is added, deleted, or modified for application A on device 1, application A on device 2 can obtain the updated data. In addition, device 2 can listen for data changes and online/offline of the data objects on device 1. - -## Available APIs - -For details about the APIs, see [Distributed Data Object](../reference/apis/js-apis-data-distributedobject.md). - -### Creating a Distributed Data Object Instance - -Call **createDistributedObject()** to create a distributed data object instance. You can specify the attributes of the instance in **source**. - - -**Table 1** API for creating a distributed data object instance - -| Bundle Name| API| Description| -| -------- | -------- | -------- | -| ohos.data.distributedDataObject| createDistributedObject(source: object): DistributedObject | Creates a distributed data object instance for data operations.
- **source**: attributes of the distributed data object to create.
- **DistributedObject**: returns the distributed data object created.| - -### Generating a Session ID - -Call **genSessionId()** to generate a session ID randomly. The generated session ID can be used to set the session ID of a distributed data object. - -**Table 2** API for generating a session ID randomly - -| Bundle Name| API| Description| -| -------- | -------- | -------- | -| ohos.data.distributedDataObject| genSessionId(): string | Generates a session ID, which can be used as the session ID of a distributed data object.| - -### Setting a Session ID for a Distributed Data Object - -Call **setSessionId()** to set a session ID for a distributed data object. The session ID is a unique identifier for one collaboration across devices. The distributed data objects to be synchronized must be associated with the same session ID. - -**Table 3** API for setting a session ID - -| Class| API| Description| -| -------- | -------- | -------- | -| DistributedDataObject | setSessionId(sessionId?: string): boolean | Sets a session ID for this distributed data object.
**sessionId**: ID of the distributed data object on a trusted network. To remove a distributed data object from the network, set this parameter to "" or leave it empty.| - -### Observing Data Changes - -Call **on()** to subscribe to data changes of a distributed data object. When the data changes, a callback will be invoked to return the data changes. You can use **off()** to unsubscribe from the data changes. - -**Table 4** APIs for observing data changes of a distributed data object - -| Class| API| Description| -| -------- | -------- | -------- | -| DistributedDataObject| on(type: 'change', callback: Callback<{ sessionId: string, fields: Array<string> }>): void | Subscribes to data changes.| -| DistributedDataObject| off(type: 'change', callback?: Callback<{ sessionId: string, fields: Array<string> }>): void | Unsubscribes from data changes.
**Callback**: callback to unregister. If this parameter is not specified, all data changes of this distributed data object will be unsubscribed from. | - -### Observing Online or Offline Status - -Call **on()** to subscribe to status changes of a distributed data object. The status can be online or offline. When the status changes, a callback will be invoked to return the status. You can use **off()** to unsubscribe from the status changes. - -**Table 5** APIs for observing status changes of a distributed data object - -| Class| API| Description| -| -------- | -------- | -------- | -| DistributedDataObject| on(type: 'status', callback: Callback<{ sessionId: string, networkId: string, status: 'online' \| 'offline' }>): void | Subscribes to the status changes of a distributed data object.| -| DistributedDataObject| off(type: 'status', callback?: Callback<{ sessionId: string, deviceId: string, status: 'online' \| 'offline' }>): void | Unsubscribes from status changes of a distributed data object.| - -### Saving or Deleting a Distributed Data Object - -Call **save()** to save a distributed data object. When the application is active, the saved data will not be released. When the application exits and restarts, the data saved on the device will be restored. - -Call **revokeSave()** to delete a distributed data object that is no longer required. If the distributed data object is saved on the local device, **revokeSave()** will delete the data from all trusted devices. If the distributed data object is not saved on the local device, **revokeSave()** will delete the data from the local device. - -The saved data will be released in the following cases: - -- The data is stored for more than 24 hours. -- The application has been uninstalled. -- Data is successfully restored. - -**Table 6** APIs for saving and deleting a distributed data object - -| Class| API| Description| -| -------- | -------- | -------- | -| DistributedDataObject | save(deviceId: string): Promise<SaveSuccessResponse> | Saves a distributed data object.| -| DistributedDataObject| revokeSave(): Promise<RevokeSaveSuccessResponse> | Deletes a distributed data object. | - -## How to Develop - -The following example shows how to implement distributed data object synchronization. - -1. Import the @ohos.data.distributedDataObject module to the development environment. - - ```js - import distributedObject from '@ohos.data.distributedDataObject'; - ``` -2. Apply for the permission. - - Add the required permission (FA model) to the **config.json** file. - - ```json - { - "module": { - "reqPermissions": [ - { - "name": "ohos.permission.DISTRIBUTED_DATASYNC" - } - ] - } - } - ``` - For the apps based on the stage model, see [Declaring Permissions](../security/accesstoken-guidelines.md#stage-model). - - This permission must also be granted by the user when the application is started for the first time. - - ```js - // FA model - import featureAbility from '@ohos.ability.featureAbility'; - - function grantPermission() { - console.info('grantPermission'); - let context = featureAbility.getContext(); - context.requestPermissionsFromUser(['ohos.permission.DISTRIBUTED_DATASYNC'], 666, function (result) { - console.info(`requestPermissionsFromUser CallBack`); - - }) - console.info('end grantPermission'); - } - - grantPermission(); - ``` - - ```ts - // Stage model - import UIAbility from '@ohos.app.ability.UIAbility'; - - let context = null; - - class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage) { - context = this.context; - } - } - - function grantPermission() { - let permissions = ['ohos.permission.DISTRIBUTED_DATASYNC']; - context.requestPermissionsFromUser(permissions).then((data) => { - console.info('success: ${data}'); - }).catch((error) => { - console.error('failed: ${error}'); - }); - } - - grantPermission(); - ``` - -3. Obtain a distributed data object instance. - - ```js - let localObject = distributedObject.createDistributedObject({ - name: undefined, - age: undefined, - isVis: true, - parent: undefined, - list: undefined - }); - let sessionId = distributedObject.genSessionId(); - ``` - -4. Add the distributed data object instance to a network for data synchronization. The data objects in the synchronization network include the local and remote objects. - - ```js - // Local object - let localObject = distributedObject.createDistributedObject({ - name: "jack", - age: 18, - isVis: true, - parent: { mother: "jack mom", father: "jack Dad" }, - list: [{ mother: "jack mom" }, { father: "jack Dad" }] - }); - localObject.setSessionId(sessionId); - - // Remote object - let remoteObject = distributedObject.createDistributedObject({ - name: undefined, - age: undefined, - isVis: true, - parent: undefined, - list: undefined - }); - // After learning that the local device goes online, the remote object synchronizes data. That is, name changes to jack and age to 18. - remoteObject.setSessionId(sessionId); - ``` - -5. Observe the data changes of the distributed data object. You can subscribe to data changes of the remote object. When the data in the remote object changes, a callback will be invoked to return the data changes. - - ```js - function changeCallback(sessionId, changeData) { - console.info("change" + sessionId); - - if (changeData != null && changeData != undefined) { - changeData.forEach(element => { - console.info("changed !" + element + " " + localObject[element]); - }); - } - } - - // To refresh the page in changeCallback, correctly bind (this) to the changeCallback. - localObject.on("change", this.changeCallback.bind(this)); - ``` - -6. Modify attributes of the distributed data object. The object attributes support basic data types (such as number, Boolean, and string) and complex data types (array and nested basic types). - - ```js - localObject.name = "jack"; - localObject.age = 19; - localObject.isVis = false; - localObject.parent = { mother: "jack mom", father: "jack Dad" }; - localObject.list = [{ mother: "jack mom" }, { father: "jack Dad" }]; - ``` - - > **NOTE**
- > For the distributed data object of the complex type, only the root attribute can be modified. The subordinate attributes cannot be modified. - - ```js - // Supported modification. - localObject.parent = { mother: "mom", father: "dad" }; - // Modification not supported. - localObject.parent.mother = "mom"; - ``` - -7. Access the distributed data object.
Obtain the distributed data object attributes, which are the latest data on the network. - - ```js - console.info("name " + localObject["name"]); - ``` -8. Unsubscribe from data changes. You can specify the callback to unregister. If you do not specify the callback, all data change callbacks of the distributed data object will be unregistered. - - ```js - // Unregister the specified data change callback. - localObject.off("change", changeCallback); - // Unregister all data change callbacks. - localObject.off("change"); - ``` -9. Subscribe to status changes of this distributed data object. A callback will be invoked to report the status change when the target distributed data object goes online or offline. - - ```js - function statusCallback(sessionId, networkId, status) { - this.response += "status changed " + sessionId + " " + status + " " + networkId; - } - - localObject.on("status", this.statusCallback); - ``` - -10. Save a distributed data object and delete it. - - ```js - // Save a distributed data object. - localObject.save("local").then((result) => { - console.info("save sessionId " + result.sessionId); - console.info("save version " + result.version); - console.info("save deviceId " + result.deviceId); - }, (result) => { - console.info("save local failed."); - }); - // Revoke the data saving operation. - localObject.revokeSave().then((result) => { - console.info("revokeSave success."); - }, (result) => { - console.info("revokeSave failed."); - }); - ``` -11. Unsubscribe from the status changes of this distributed data object. You can specify the callback to unregister. If you do not specify the callback, this API unregisters all status change callbacks of this distributed data object. - - ```js - // Unregister the specified status change callback. - localObject.off("status", this.statusCallback); - // Unregister all status change callbacks. - localObject.off("status"); - ``` -12. Remove the distributed data object from the synchronization network. The data changes on the local object will not be synchronized to the removed distributed data object. - - ```js - localObject.setSessionId(""); - ``` diff --git a/en/application-dev/database/database-distributedobject-overview.md b/en/application-dev/database/database-distributedobject-overview.md deleted file mode 100644 index 9fb93eba7c13f85da0cdccb9036df26e4d8d8ce0..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-distributedobject-overview.md +++ /dev/null @@ -1,45 +0,0 @@ -# Distributed Data Object Overview - -The distributed data object management framework provides object-oriented in-memory data management. It provides basic data management capabilities, such as creating, querying, deleting, and modifying distributed data objects, and observing data and status changes of the distributed data objects. This management framework also provides distributed capabilities to implement data object collaboration for the same application between multiple devices that form a Super Device. - - -## Basic Concepts - -- **Distributed in-memory database** - - The distributed in-memory database caches data in the memory so that applications can quickly access data. This database, however, does not store data persistently. If the database is closed, the data is not retained. - - -- **Distributed data object** - - A distributed data object is an encapsulation of the JS object type. Each distributed data object instance creates a data table in the in-memory database. The in-memory databases created for different applications are isolated from each other. Reading data from and writing data to a distributed data object are mapped to the **get** and **put** operations in the corresponding database, respectively. - - The distributed data object can be in the following states in its lifecycle: - - - **Uninitialized**: The distributed data object is not instantiated or has been destroyed. - - **Local**: The data table is created, but the data cannot be synchronized. - - **Distributed**: The data table is created, and there are at least two online devices with the same session ID. In this case, data can be synchronized across devices. If a device is offline or the session ID is empty, the distributed data object changes to the local state. - - -## Working Principles - -The distributed data objects are encapsulated into JS objects in distributed in-memory databases. This allows the distributed data objects to be operated in the same way as local variables. The system automatically implements cross-device data synchronization. - -**Figure 1** Working mechanism - -![how-distributedobject-works](figures/how-distributedobject-works.png) - - - - -## Constraints - -- Data synchronization can be implemented across devices only for the applications with the same **bundleName**. - -- Each distributed data object occupies 100 KB to 150 KB of memory. Therefore, you are advised not to create too many distributed data objects. - -- The maximum size of a distributed data object is 500 KB. - -- For the distributed data object of the complex type, only the root attribute can be modified. The subordinate attributes cannot be modified. - -- Only JS APIs are supported. diff --git a/en/application-dev/database/database-mdds-guidelines.md b/en/application-dev/database/database-mdds-guidelines.md deleted file mode 100644 index 70c0ee209975ff3322210041e123afbeec3b5e6f..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-mdds-guidelines.md +++ /dev/null @@ -1,263 +0,0 @@ -# Distributed Data Service Development - -## When to Use - -The Distributed Data Service (DDS) implements synchronization of application data across user devices. When data is added, deleted, or modified for an application on a device, the same application on another device can obtain the updated data. The DDS applies to the distributed gallery, messages, contacts, and file manager. - - -## Available APIs - -For details about the APIs, see [Distributed KV Store](../reference/apis/js-apis-distributedKVStore.md). - -**Table 1** APIs provided by the DDS - -| API | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | -| createKVManager(config: KVManagerConfig): KVManager | Creates a **KvManager** object for database management. | -| getKVStore<T extends KVStore>(storeId: string, options: Options, callback: AsyncCallback<T>): void
getKVStore<T extends KVStore>(storeId: string, options: Options): Promise<T> | Creates and obtains a KV store.| -| put(key: string, value: Uint8Array\|string\|number\|boolean, callback: AsyncCallback<void>): void
put(key: string, value: Uint8Array\|string\|number\|boolean): Promise<void> | Inserts and updates data. | -| delete(key: string, callback: AsyncCallback<void>): void
delete(key: string): Promise<void> | Deletes data. | -| get(key: string, callback: AsyncCallback<Uint8Array\|string\|boolean\|number>): void
get(key: string): Promise<Uint8Array\|string\|boolean\|number> | Obtains data. | -| on(event: 'dataChange', type: SubscribeType, observer: Callback<ChangeNotification>): void
on(event: 'syncComplete', syncCallback: Callback<Array<[string,number]>>): void | Subscribes to data changes in the KV store. | -| sync(deviceIdList: string[], mode: SyncMode, delayMs?: number): void | Triggers database synchronization in manual mode. | - -## How to Develop - -The following uses a single KV store as an example to describe the development procedure. - -1. Import the distributed data module. - - ```js - import distributedKVStore from '@ohos.data.distributedKVStore'; - ``` - -2. Apply for the required permission if data synchronization is required. - - Add the permission required (FA model) in the **config.json** file. The sample code is as follows: - - ```json - { - "module": { - "reqPermissions": [ - { - "name": "ohos.permission.DISTRIBUTED_DATASYNC" - } - ] - } - } - ``` - - For the apps based on the stage model, see [Declaring Permissions](../security/accesstoken-guidelines.md#stage-model). - - This permission must also be granted by the user when the application is started for the first time. The sample code is as follows: - - ```js - // FA model - import featureAbility from '@ohos.ability.featureAbility'; - - function grantPermission() { - console.info('grantPermission'); - let context = featureAbility.getContext(); - context.requestPermissionsFromUser(['ohos.permission.DISTRIBUTED_DATASYNC'], 666).then((data) => { - console.info('success: ${data}'); - }).catch((error) => { - console.error('failed: ${error}'); - }) - } - - grantPermission(); - - // Stage model - import UIAbility from '@ohos.app.ability.UIAbility'; - - let context = null; - - class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage) { - let context = this.context; - } - } - - function grantPermission() { - let permissions = ['ohos.permission.DISTRIBUTED_DATASYNC']; - context.requestPermissionsFromUser(permissions).then((data) => { - console.log('success: ${data}'); - }).catch((error) => { - console.error('failed: ${error}'); - }); - } - - grantPermission(); - ``` - -3. Create a **KvManager** instance based on the specified **KvManagerConfig** object. - - 1. Create a **kvManagerConfig** object based on the application context. - 2. Create a **KvManager** instance. - - The sample code is as follows: - - ```js - // Obtain the context of the FA model. - import featureAbility from '@ohos.ability.featureAbility'; - let context = featureAbility.getContext(); - - // Obtain the context of the stage model. - import UIAbility from '@ohos.app.ability.UIAbility'; - let context = null; - class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage){ - context = this.context; - } - } - - let kvManager; - try { - const kvManagerConfig = { - bundleName: 'com.example.datamanagertest', - context:context, - } - kvManager = distributedKVStore.createKVManager(kvManagerConfig); - console.log("Created KVManager successfully"); - } catch (e) { - console.error(`Failed to create KVManager. Code is ${e.code}, message is ${e.message}`); - } - ``` - -4. Create and obtain a single KV store. - - 1. Declare the ID of the single KV store to create. - 2. Create a single KV store. You are advised to disable automatic synchronization (`autoSync:false`) and call `sync` when a synchronization is required. - - The sample code is as follows: - - ```js - let kvStore; - try { - const options = { - createIfMissing: true, - encrypt: false, - backup: false, - autoSync: false, - kvStoreType: distributedKVStore.KVStoreType.SINGLE_VERSION, - securityLevel: distributedKVStore.SecurityLevel.S1 - }; - kvManager.getKVStore('storeId', options, function (err, store) { - if (err) { - console.error(`Failed to get KVStore: code is ${err.code}, message is ${err.message}`); - return; - } - console.log('Obtained KVStore successfully'); - kvStore = store; - }); - } catch (e) { - console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`); - } - ``` - - > **NOTE**
- > - > For data synchronization between networked devices, you are advised to open the distributed KV store during application startup to obtain the database handle. With this database handle (`kvStore` in this example), you can perform operations, such as inserting data into the KV store, without creating the KV store repeatedly during the lifecycle of the handle. - -5. Subscribe to changes in the distributed data. - - The following is the sample code for subscribing to the data changes of a single KV store: - - ```js - try{ - kvStore.on('dataChange', distributedKVStore.SubscribeType.SUBSCRIBE_TYPE_ALL, function (data) { - console.log(`dataChange callback call data: ${data}`); - }); - }catch(e){ - console.error(`An unexpected error occured. Code is ${e.code}, message is ${e.message}`); - } - ``` - -6. Write data to the single KV store. - - 1. Construct the `Key` and `Value` to be written into the single KV store. - 2. Write key-value pairs into the single KV store. - - The following is the sample code for writing key-value pairs of the string type into the single KV store: - - ```js - const KEY_TEST_STRING_ELEMENT = 'key_test_string'; - const VALUE_TEST_STRING_ELEMENT = 'value-test-string'; - try { - kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, function (err,data) { - if (err != undefined) { - console.error(`Failed to put data. Code is ${err.code}, message is ${err.message}`); - return; - } - console.log("Put data successfully"); - }); - }catch (e) { - console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`); - } - ``` - -7. Query data in the single KV store. - - 1. Construct the `Key` to be queried from the single KV store. - 2. Query data from the single KV store. - - The following is the sample code for querying data of the string type from the single KV store: - - ```js - const KEY_TEST_STRING_ELEMENT = 'key_test_string'; - const VALUE_TEST_STRING_ELEMENT = 'value-test-string'; - try { - kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, function (err,data) { - if (err != undefined) { - console.error(`Failed to put data. Code is ${err.code}, message is ${err.message}`); - return; - } - console.log("Put data successfully"); - kvStore.get(KEY_TEST_STRING_ELEMENT, function (err,data) { - if (err != undefined) { - console.error(`Failed to obtain data. Code is ${err.code}, message is ${err.message}`); - return; - } - console.log(`Obtained data successfully:${data}`); - }); - }); - }catch (e) { - console.error(`Failed to obtain data. Code is ${e.code}, message is ${e.message}`); - } - ``` - -8. Synchronize data to other devices. - - Select the devices in the same network and the synchronization mode to synchronize data. - - > **NOTE**
- > - > The APIs of the `deviceManager` module are system interfaces. - - The following is the example code for synchronizing data in a single KV store: - - ```js - import deviceManager from '@ohos.distributedHardware.deviceManager'; - - let devManager; - // Create deviceManager. - deviceManager.createDeviceManager('bundleName', (err, value) => { - if (!err) { - devManager = value; - // deviceIds is obtained by deviceManager by calling getTrustedDeviceListSync(). - let deviceIds = []; - if (devManager != null) { - var devices = devManager.getTrustedDeviceListSync(); - for (var i = 0; i < devices.length; i++) { - deviceIds[i] = devices[i].deviceId; - } - } - try{ - // 1000 indicates that the maximum delay is 1000 ms. - kvStore.sync(deviceIds, distributedKVStore.SyncMode.PUSH_ONLY, 1000); - } catch (e) { - console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`); - } - } - }); - ``` diff --git a/en/application-dev/database/database-mdds-overview.md b/en/application-dev/database/database-mdds-overview.md deleted file mode 100644 index cfe264a4f7eb06cd51cb834bc3e38ee27e649a14..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-mdds-overview.md +++ /dev/null @@ -1,103 +0,0 @@ -# Distributed Data Service Overview - -The distributed data service (DDS) implements distributed database collaboration across devices for applications. - -Applications save data to distributed databases by calling the DDS APIs. The DDS isolates data of different applications based on a triplet of account, application, and database to ensure secure data access. The DDS synchronizes application data between trusted devices to provide users with consistent data access experience on different devices. - -You do not need to care about the implementation of the database locking mechanism. - - -## Basic Concepts - -### KV Data Model - -The key-value (KV) data model allows data to be organized, indexed, and stored in KV pairs. - -The KV data model is suitable for storing service data that is not related. It provides better read and write performance than the SQL database. The KV data model is widely used in distributed scenarios because it handles database version compatibility issues and data synchronization conflicts easily. The distributed database is based on the KV data model and provides KV-based access interfaces. - -### Distributed Database Transaction - -Distributed database transactions include local transactions (same as the transactions of traditional databases) and synchronization transactions. Synchronization transactions allow data to be synchronized between devices by local transaction. Synchronization of a local transaction modification either succeeds or fails on all the devices. - -### Distributed Database Consistency - -In a distributed scenario, cross-device collaboration demands consistent data between the devices in the same network. The data consistency can be classified into the following types: - -- **Strong consistency**: When data is inserted, deleted, or modified on a device, other devices in the same network will obtain the latest data immediately. -- **Weak consistency**: When data is added, deleted, or modified on a device, other devices in the same network may or may not obtain the latest data. The data on these devices may be inconsistent after a certain period of time. -- **Eventual consistency**: When data is added, deleted, or modified on a device, other devices in the same network may not obtain the latest data immediately. However, data on these devices will become consistent after a certain period of time. - -Strong consistency has high requirements on distributed data management and may be used in distributed server deployment. The DDS supports only the eventual consistency because mobile devices are not always online and the network has no center. - -### Distributed Database Synchronization - -After discovering and authenticating a device, the underlying communication component notifies the upper-layer application (including the DDS) that the device goes online. The DDS then establishes an encrypted transmission channel to synchronize data between the two devices. - -The DDS provides the following synchronization modes: - -- **Manual synchronization**: Applications call **sync()** to trigger a synchronization. The list of devices to be synchronized and the synchronization mode must be specified. The synchronization mode can be **PULL_ONLY** (pulling remote data to the local end), **PUSH_ONLY** (pushing local data to the remote end), or **PUSH_PULL** (pushing local data to the remote end and pulling remote data to the local end). The internal interface supports condition-based synchronization. The data that meets the conditions can be synchronized to the remote end. -- **Automatic synchronization**: includes full synchronization and condition-based subscription synchronization. In full synchronization, the distributed database automatically pushes local data to the remote end and pulls remote data to the local end when a device goes online or application data is updated. Applications do not need to call **sync()**. The internal interface supports condition-based subscription synchronization. The data that meets the subscription conditions on the remote end is automatically synchronized to the local end. - -### Single KV Store - -Data is saved locally in the unit of a single KV entry. Only one entry is saved for each key. Data can be modified only locally and synchronized to remote devices in sequence based on the update time. - -### Device KV Store - -The device KV store is based on the single KV store. The local device ID is added to the key when KV data is stored in the device KV store. Data can be isolated, managed, and queried by device. However, the data synchronized from remote devices cannot be modified locally. - -### Conflict Resolution - -A data conflict occurs when multiple devices modify the same data and commit the modification to the database. The last write wins (LWW) is the default conflict resolution policy used for data conflicts. Based on the commit timestamps, the data with a later timestamp is used. Currently, customized conflict resolution policies are not supported. - -### Schema-based Database Management and Predicate-based Data Query - -A schema is specified when you create or open a single KV store. Based on the schema, the database detects the value format of KV pairs and checks the value structure. Based on the fields in the values, the database implements index creation and predicate-based query. - -### Distributed Database Backup - -The DDS provides the database backup capability. You can set **backup** to **true** to enable daily backup. If a distributed database is damaged, the DDS deletes the database and restores the most recent data from the backup database. If no backup database is available, the DDS creates one. The DDS can also back up encrypted databases. - - -## Working Principles - -The DDS supports distributed management of application database data in the OpenHarmony system. Data can be synchronized between multiple devices with the same account, delivering a consistent user experience across devices. - -The DDS consists of the following: - -- **APIs**
The DDS provides APIs to create databases, access data, and subscribe to data. The APIs support the KV data model and common data types. They are highly compatible and easy to use, and can be released. - -- **Service component**
The service component implements management of metadata, permissions, encryption, backup and restore, and multiple users, and completes initialization of the storage component, synchronization component, and communication adaptation layer of the distributed database. - -- **Storage component**
The storage component implements data access, data reduction, transactions, snapshots, database encryption, data combination, and conflict resolution. - -- **Synchronization component**
The synchronization component interacts with the storage component and the communication adaptation layer to maintain data consistency between online devices. It synchronizes data generated on the local device to other devices and merges data from other devices into the local device. - -- **Communication adaptation layer**
The communication adaptation layer calls APIs of the underlying public communication layer to create and connect to communication channels, receive device online and offline messages, update metadata of the connected and disconnected devices, send device online and offline messages to the synchronization component. The synchronization component updates the list of connected devices, and calls the APIs of the communication adaption layer to encapsulate data and send the data to the connected devices. - -Applications call the DDS APIs to create, access, and subscribe to distributed databases. The APIs store data to the storage component based on the capabilities provided by the service component. The storage component interacts with the synchronization component to synchronize data. The synchronization component uses the communication adaptation layer to synchronize data to remote devices, which update the data in the storage component and provide the data for applications through service APIs. - - -**Figure 1** How DDS works - - -![](figures/en-us_image_0000001183386164.png) - - -## Constraints - -- The DDS supports the KV data model only. It does not support foreign keys or triggers of the relational database. - -- The KV data model specifications supported by the DDS are as follows: - - - For each record in a device KV store, the key must be less than or equal to 896 bytes and the value be less than 4 MB. - - For each record in a single KV store, the key must be less than or equal to 1 KB and the value be less than 4 MB. - - An application can open a maximum of 16 KV stores simultaneously. - -- The data that needs to be synchronized between devices should be stored in distributed databases rather than local databases. - -- The DDS does not support customized conflict resolution policies. - -- The maximum number of access requests to the KvStore API is 1000 per second and 10000 per minute. The maximum number of access requests to the KvManager API is 50 per second and 500 per minute. - -- Blocking operations, such as modifying UI components, are not allowed in the distributed database event callback. diff --git a/en/application-dev/database/database-preference-guidelines.md b/en/application-dev/database/database-preference-guidelines.md deleted file mode 100644 index 724e273675061c4b6969fb3fcd6f6cbdd984a15f..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-preference-guidelines.md +++ /dev/null @@ -1,203 +0,0 @@ -# Preferences Development - -> **NOTE** -> -> This feature is supported since API version 9. For the versions earlier than API version 9, use [Lightweight Storage](../reference/apis/js-apis-data-storage.md) APIs. - -## When to Use - -Preferences are used for storing the data that is frequently used by applications, but not for storing a large amount of data or data frequently changed. The application data is persistently stored on a device in the form of files. - -Note that the instance accessed by an application contains all data of the file. The data is always loaded to the memory of the device until the application removes it from the memory. The application can call the **Preferences** APIs to manage data. - -## Available APIs - -The **Preferences** module provides APIs for processing data in the form of key-value (KV) pairs and supports persistence of the KV pairs when required. - -The key is of the string type, and the value can be a number, a string, a Boolean value, or an array of numbers, strings, or Boolean values. - -For details about **Preferences** APIs, see [Preferences](../reference/apis/js-apis-data-preferences.md). - -### Obtaining a **Preferences** Instance - -Obtain a **Preferences** instance for data operations. A **Preferences** instance is obtained after data is read from a specified file and loaded to the instance. - -**Table 1** API for obtaining a **Preferences** instance - -| Bundle Name | API | Description | -| --------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| ohos.data.preferences | getPreferences(context: Context, name: string): Promise\ | Obtains a **Preferences** instance.| - -### Processing Data - -Call **put()** to add or modify data in a **Preferences** instance. - -Call **get()** to read data from a **Preferences** instance. - -Call **getAll()** to obtain an **Object** instance that contains all KV pairs in a **Preferences** instance. - -Call **delete()** to delete the KV pair of the specified key from the **Preferences** instance. - -**Table 2** APIs for processing **Preferences** data - -| Class | API | Description | -| ----------- | ---------------------------------------------------------- | ------------------------------------------------------------ | -| Preferences | put(key: string, value: ValueType): Promise\ | Writes data to the **Preferences** instance. The value to write can be a number, a string, a Boolean value, or an array of numbers, strings, or Boolean values.| -| Preferences | get(key: string, defValue: ValueType): Promise\ | Obtains data from the **Preferences** instance. The value to read can be a number, a string, a Boolean value, or an array of numbers, strings, or Boolean values.| -| Preferences | getAll(): Promise\ | Obtains an **Object** instance that contains all KV pairs in the **Preferences** instance. | -| Preferences | delete(key: string): Promise\ | Deletes the KV pair of the specified key from the **Preferences** instance. | - - -### Storing Data Persistently - -Call **flush()** to write the cached data back to its text file for persistent storage. - -**Table 4** API for data persistence - -| Class | API | Description | -| ----------- | ----------------------- | ------------------------------------------- | -| Preferences | flush(): Promise\ | Flushes data from the **Preferences** instance to its file through an asynchronous thread.| - -### Observing Data Changes - -You can subscribe to data changes. When the value of the subscribed key is changed and saved by **flush()**, a callback will be invoked to return the new data. - -**Table 5** APIs for observing **Preferences** changes - -| Class | API | Description | -| ----------- | ------------------------------------------------------------ | -------------- | -| Preferences | on(type: 'change', callback: Callback<{ key : string }>): void | Subscribes to data changes.| -| Preferences | off(type: 'change', callback: Callback<{ key : string }>): void | Unsubscribes from data changes. | - -### Deleting Data - -You can use the following APIs to delete a **Preferences** instance or data file. - -**Table 6** APIs for deleting **Preferences** - -| Bundle Name | API | Description | -| --------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| ohos.data.preferences | deletePreferences(context: Context, name: string): Promise\ | Deletes a **Preferences** instance from the memory and its files from the device.| -| ohos.data.preferences | removePreferencesFromCache(context: Context, name: string): Promise\ | Removes a **Preferences** instance from the memory to release memory. | - -## How to Develop - -1. Import @ohos.data.preferences and related modules to the development environment. - - ```js - import data_preferences from '@ohos.data.preferences'; - ``` - -2. Obtain a **Preferences** instance. - - Read the specified file and load its data to the **Preferences** instance for data operations. - - FA model: - - ```js - // Obtain the context. - import featureAbility from '@ohos.ability.featureAbility' - let context = featureAbility.getContext(); - - let preferences = null; - let promise = data_preferences.getPreferences(context, 'mystore'); - - promise.then((pref) => { - preferences = pref; - }).catch((err) => { - console.info("Failed to get the preferences."); - }) - ``` - - Stage model: - - ```ts - // Obtain the context. - import UIAbility from '@ohos.app.ability.UIAbility'; - let preferences = null; - export default class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage) { - let promise = data_preferences.getPreferences(this.context, 'mystore'); - promise.then((pref) => { - preferences = pref; - }).catch((err) => { - console.info("Failed to get the preferences."); - }) - } - } - - - ``` - -3. Write data. - - Use **preferences.put()** to write data to the **Preferences** instance. - - ```js - let putPromise = preferences.put('startup', 'auto'); - putPromise.then(() => { - console.info("Put the value of 'startup' successfully."); - }).catch((err) => { - console.info("Failed to put the value of 'startup'. Cause: " + err); - }) - ``` - -4. Read data. - - Use **preferences.get()** to read data. - - ```js - let getPromise = preferences.get('startup', 'default'); - getPromise.then((value) => { - console.info("The value of 'startup' is " + value); - }).catch((err) => { - console.info("Failed to get the value of 'startup'. Cause: " + err); - }) - ``` - -5. Store data persistently. - - Use **preferences.flush()** to flush data from the **Preferences** instance to its file. - - ```js - preferences.flush(); - ``` - -6. Observe data changes. - - Specify an observer as the callback to subscribe to data changes for an application. When the value of the subscribed key is changed and saved by **flush()**, the observer callback will be invoked to return the new data. - - ```js - let observer = function (key) { - console.info("The key" + key + " changed."); - } - preferences.on('change', observer); - // The data is changed from 'auto' to 'manual'. - preferences.put('startup', 'manual', function (err) { - if (err) { - console.info("Failed to put the value of 'startup'. Cause: " + err); - return; - } - console.info("Put the value of 'startup' successfully."); - preferences.flush(function (err) { - if (err) { - console.info("Failed to flush data. Cause: " + err); - return; - } - console.info("Flushed data successfully."); // The observer will be called. - }) - }) - ``` - -7. Delete the specified file. - - Use **deletePreferences()** to delete the **Preferences** instance and its persistent file and backup and corrupted files. After the specified files are deleted, the application cannot use that instance to perform any data operation. Otherwise, data inconsistency will be caused. The deleted data and files cannot be restored. - - ```js - let proDelete = data_preferences.deletePreferences(context, 'mystore'); - proDelete.then(() => { - console.info("Deleted data successfully."); - }).catch((err) => { - console.info("Failed to delete data. Cause: " + err); - }) - ``` diff --git a/en/application-dev/database/database-preference-overview.md b/en/application-dev/database/database-preference-overview.md deleted file mode 100644 index 4987b6e8494c738ef29e2f362b2acda43528bbb6..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-preference-overview.md +++ /dev/null @@ -1,34 +0,0 @@ -# Preferences Overview - -Preferences are used to implement quick access and persistence of the data in the `key-value` structure. - -After an application obtains a **Preferences** instance, the data in the instance will be cached in the memory for faster access. - -The cached data can also be written to a text file for persistent storage. Since file read and write consume system resources, you are advised to minimize the frequency of reading and writing files. - -You do not need to care about the implementation of the database locking mechanism. - -## Basic Concepts - -- **Key-value structure** - - A type of data structure. The `Key` is the unique identifier for a piece of data, and the `Value` is the specific data being identified. - -- **Non-relational database** - - A database not in compliance with the atomicity, consistency, isolation, and durability (ACID) properties of relational data transactions. The data in a non-relational database is independent. The database that organizes data in the `key-value` structure is a non-relational database. - -## Working Principles - -1. An application can load data from a **Preferences** persistent file to a **Preferences** instance. The system stores the **Preferences** instance in the memory through a static container. Each file of an application or process has only one **Preferences** instance in the memory, till the application removes the instance from the memory or deletes the **Preferences** persistent file. -2. When obtaining a **Preferences** instance, the application can read data from or write data to the instance. The data in the `Preferences` instance can be flushed to its **Preferences** persistent file by calling the **flush()** method. - -**Figure 1** Working mechanism - -![](figures/preferences.png) - -## Constraints - -- **Preferences** instances are loaded to the memory. To minimize non-memory overhead, the number of data records stored in a **Preferences** instance cannot exceed 10,000. Delete the instances that are no longer used in a timely manner. -- The `Key` in key-value pairs is of the string type. It cannot be empty or exceed 80 bytes. -- The `Value` of the string type in key-value pairs can be empty, but cannot exceed 8192 bytes if not empty. diff --git a/en/application-dev/database/database-relational-guidelines.md b/en/application-dev/database/database-relational-guidelines.md deleted file mode 100644 index 728e66f064bf79635a0ca18640d00c6713c7edc8..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-relational-guidelines.md +++ /dev/null @@ -1,496 +0,0 @@ -# RDB Development - -## When to Use - -A relational database (RDB) store allows you to operate local data with or without native SQL statements based on SQLite. - - -## Available APIs - -Most of the RDB store APIs are asynchronous interfaces, which can use a callback or promise to return the result. This document uses the promise-based APIs as an example. For more information about the APIs, see [RDB Store](../reference/apis/js-apis-data-relationalStore.md). - -### Creating or Deleting an RDB Store - -The following table describes the APIs for creating and deleting an RDB store. - -**Table 1** APIs for creating and deleting an RDB store - -| API | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | -| getRdbStore(context: Context, config: StoreConfig): Promise<RdbStore> | Obtains an **RdbStore** object. This API uses a promise to return the result. You can set parameters for the **RdbStore** object based on service requirements and use **RdbStore** APIs to perform data operations.
- **context**: application context.
- **config**: configuration of the RDB store.| -| deleteRdbStore(context: Context, name: string): Promise<void> | Deletes an RDB store. This API uses a promise to return the result.
- **context**: application context.
- **name**: name of the RDB store to delete.| - -### Managing Data in an RDB Store - -The RDB provides APIs for inserting, deleting, updating, and querying data in the local RDB store. - -- **Inserting Data** - - The RDB provides APIs for inserting data through a **ValuesBucket** in a data table. If the data is inserted, the row ID of the data inserted will be returned; otherwise, **-1** will be returned. - - **Table 2** API for inserting data - - - | Class | API | Description | - | ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | RdbStore | insert(table: string, values: ValuesBucket): Promise<number> | Inserts a row of data into a table. This API uses a promise to return the result.
If the operation is successful, the row ID will be returned; otherwise, **-1** will be returned.
- **table**: name of the target table.
- **values**: data to be inserted into the table.| - -- **Updating Data** - - Call **update()** to pass the new data and specify the update conditions by using **RdbPredicates**. If the data is updated, the number of rows of the updated data will be returned; otherwise, **0** will be returned. - - **Table 3** API for updating data - - - | Class | API | Description | - | ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | RdbStore | update(values: ValuesBucket, predicates: RdbPredicates): Promise<number> | Updates data based on the specified **RdbPredicates** object. This API uses a promise to return the number of rows updated.
- **values**: data to update, which is stored in **ValuesBucket**.
- **predicates**: conditions for updating data.| - -- **Deleting Data** - - Call **delete()** to delete the data that meets the conditions specified by **RdbPredicates**. If the data is deleted, the number of rows of the deleted data will be returned; otherwise, **0** will be returned. - - **Table 4** API for deleting data - - - | Class | API | Description | - | ---------- | ---------------------------------------------------------- | ------------------------------------------------------------ | - | RdbStore | delete(predicates: RdbPredicates): Promise<number> | Deletes data from the RDB store based on the specified **RdbPredicates** object. This API uses a promise to return the number of rows deleted.
- **predicates**: conditions for deleting data.| - -- **Querying Data** - - You can query data in an RDB store in either of the following ways: - - - Call the **query()** method to query data based on the predicates, without passing any SQL statement. - - Run the native SQL statement. - - **Table 5** APIs for querying data - - | Class | API | Description | - | ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | RdbStore | query(predicates: RdbPredicates, columns?: Array<string>): Promise<ResultSet> | Queries data from the RDB store based on specified conditions. This API uses a promise to return the result.
- **predicates**: conditions for querying data.
- **columns**: columns to query. If this parameter is not specified, the query applies to all columns.| - | RdbStore | querySql(sql: string, bindArgs?: Array<ValueType>): Promise<ResultSet> | Queries data using the specified SQL statement. This API uses a promise to return the result.
- **sql**: SQL statement.
- **bindArgs**: arguments in the SQL statement.| - | RdbStore | remoteQuery(device: string, table: string, predicates: RdbPredicates, columns: Array<string>): Promise<ResultSet> | Queries data from the database of a remote device based on specified conditions. This API uses a promise to return the result.
- **device**: network ID of the remote device.
- **table**: name of the table to be queried.
- **predicates**: **RdbPredicates** that specifies the query condition.
- **columns**: columns to query. If this parameter is not specified, the query applies to all columns.| - -### Using Predicates - -The **RDB** module provides **RdbPredicates** for you to set database operation conditions. - -The following table lists common predicates. For more information about predicates, see [**RdbPredicates**](../reference/apis/js-apis-data-relationalStore.md#rdbpredicates). - -**Table 6** APIs for using RDB store predicates - -| Class | API | Description | -| --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| RdbPredicates | equalTo(field: string, value: ValueType): RdbPredicates | Sets an **RdbPredicates** to search for the data that is equal to the specified value.
- **field**: column name in the database table.
- **value**: value to match the **RdbPredicates**.
- **RdbPredicates**: **RdbPredicates** object created.| -| RdbPredicates | notEqualTo(field: string, value: ValueType): RdbPredicates | Sets an **RdbPredicates** to search for the data that is not equal to the specified value.
- **field**: column name in the database table.
- **value**: value to match the **RdbPredicates**.
- **RdbPredicates**: **RdbPredicates** object created.| -| RdbPredicates | or(): RdbPredicates | Adds the OR condition to the **RdbPredicates**.
- **RdbPredicates**: **RdbPredicates** with the OR condition.| -| RdbPredicates | and(): RdbPredicates | Adds the AND condition to the **RdbPredicates**.
- **RdbPredicates**: **RdbPredicates** with the AND condition.| -| RdbPredicates | contains(field: string, value: string): RdbPredicates | Sets an **RdbPredicates** to search for the data that contains the specified value.
- **field**: column name in the database table.
- **value**: value to match the **RdbPredicates**.
- **RdbPredicates**: **RdbPredicates** object created.| - - -### Using the Result Set - -You can use the APIs provided by **ResultSet** to traverse and access the data you have queried. A result set can be regarded as a row of data in the queried result. - -For details about how to use **ResultSet** APIs, see [ResultSet](../reference/apis/js-apis-data-relationalStore.md#resultset). - -> **NOTICE**
-> After a result set is used, you must call the **close()** method to close it explicitly. - -**Table 7** APIs for using the result set - -| Class | API | Description | -| ----------- | ---------------------------------------- | ------------------------------------------ | -| ResultSet | goToFirstRow(): boolean | Moves to the first row of the result set. | -| ResultSet | getString(columnIndex: number): string | Obtains the value in the form of a string based on the specified column and current row. | -| ResultSet | getBlob(columnIndex: number): Uint8Array | Obtains the value in the form of a byte array based on the specified column and the current row.| -| ResultSet | getDouble(columnIndex: number): number | Obtains the value in the form of double based on the specified column and current row. | -| ResultSet | getLong(columnIndex: number): number | Obtains the value in the form of a long integer based on the specified column and current row. | -| ResultSet | close(): void | Closes the result set. | - - - -### Setting Distributed Tables - -> **NOTE** -> -> - The **ohos.permission.DISTRIBUTED_DATASYNC** permission is required for calling the **setDistributedTables**, **obtainDistributedTableName**, **sync**, **on** and **off** APIs of **RdbStore**. -> - The devices must be connected over network before the distributed tables are used. For details about the APIs and usage, see [Device Management](../reference/apis/js-apis-device-manager.md). - -**Setting Distributed Tables** - -**Table 8** API for setting distributed tables - -| Class | API | Description | -| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| RdbStore | setDistributedTables(tables: Array\): Promise\ | Sets distributed tables. This API uses a promise to return the result.
- **tables**: names of the distributed tables to set.| - -**Obtaining the Distributed Table Name for a Remote Device** - -You can obtain the distributed table name for a remote device based on the local table name. The distributed table name can be used to query the RDB store of the remote device. - -**Table 9** API for obtaining the distributed table name of a remote device - -| Class | API | Description | -| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| RdbStore | obtainDistributedTableName(device: string, table: string): Promise\ | Obtains the distributed table name for a remote device based on the local table name. The distributed table name is required when the RDB store of a remote device is queried. This API uses a promise to return the result.
- **device**: remote device.
- **table**: local table name.| - -**Synchronizing Data Between Devices** - -**Table 10** API for synchronizing data between devices - -| Class | API | Description | -| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| RdbStore | sync(mode: SyncMode, predicates: RdbPredicates): Promise\> | Synchronizes data between devices. This API uses a promise to return the result.
- **mode**: synchronization mode. **SYNC_MODE_PUSH** means to push data from the local device to a remote device. **SYNC_MODE_PULL** means to pull data from a remote device to the local device.
- **predicates**: specifies the data and devices to synchronize.
- **string**: device ID.
- **number**: synchronization status of each device. The value **0** indicates a successful synchronization. Other values indicate a synchronization failure.| - -**Registering an RDB Store Observer** - -**Table 11** API for registering an observer - -| Class | API | Description | -| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| RdbStore | on(event: 'dataChange', type: SubscribeType, observer: Callback\>): void | Registers an observer for this RDB store to subscribe to distributed data changes. When data in the RDB store changes, a callback will be invoked to return the data changes.
- **type**: subscription type. **SUBSCRIBE_TYPE_REMOTE**: subscribes to remote data changes.
- **observer**: observer that listens for data changes in the RDB store.| - -**Unregistering an RDB Store Observer** - -**Table 12** API for unregistering an observer - -| Class | API | Description | -| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| RdbStore | off(event:'dataChange', type: SubscribeType, observer: Callback\>): void; | Unregisters the observer of the specified type from the RDB store. This API uses an asynchronous callback to return the result.
- **type**: subscription type. **SUBSCRIBE_TYPE_REMOTE**: subscribes to remote data changes.
- **observer**: observer to unregister.| - -### Backing Up and Restoring an RDB Store - -**Backing Up an RDB Store** - -**Table 13** API for backing up an RDB store - -| Class | API | Description | -| ---------- | --------------------------------------------- | ------------------------------------------------------------ | -| RdbStore | backup(destName: string): Promise<void> | Backs up an RDB store. This API uses a promise to return the result.
- **destName**: name of the RDB backup file.| - -**Restoring an RDB Store** - -**Table 14** API for restoring an RDB store - -| Class | API | Description | -| ---------- | --------------------------------------------- | ------------------------------------------------------------ | -| RdbStore | restore(srcName: string): Promise<void> | Restores an RDB store from a backup file. This API uses a promise to return the result.
- **srcName**: name of the backup file used to restore the RDB store.| - -### Transaction - -**Table 15** Transaction APIs - -| Class | API | Description | -| -------- | ----------------------- | --------------------------------- | -| RdbStore | beginTransaction(): void | Starts the transaction before executing SQL statements.| -| RdbStore | commit(): void | Commits the executed SQL statements. | -| RdbStore | rollBack(): void | Rolls back the SQL statements that have been executed. | - -## How to Develop - -1. Create an RDB store. - - (1) Configure the RDB store attributes, including the RDB store name, storage mode, and whether read-only mode is used. - - (2) Initialize the table structure and related data in the RDB store. - - (3) Create an RDB store. - - FA model: - - ```js - import relationalStore from '@ohos.data.relationalStore' - import featureAbility from '@ohos.ability.featureAbility' - - var store; - - // Obtain the context. - let context = featureAbility.getContext(); - - const STORE_CONFIG = { - name: "RdbTest.db", - securityLevel: relationalStore.SecurityLevel.S1 - }; - - // Assume that the current RDB store version is 3. - relationalStore.getRdbStore(context, STORE_CONFIG, function (err, rdbStore) { - store = rdbStore; - // When an RDB store is created, the default version is 0. - if (store.version == 0) { - store.executeSql("CREATE TABLE IF NOT EXISTS student (id INTEGER PRIMARY KEY AUTOINCREMENT, score REAL);", null); - // Set the RDB store version. The input parameter must be an integer greater than 0. - store.version = 3; - } - - // When an app is updated to the current version, the RDB store needs to be updated from version 1 to version 2. - if (store.version != 3 && store.version == 1) { - // version = 1: table structure: student (id, age) => version = 2: table structure: student (id, age, score) - store.executeSql("ALTER TABLE student ADD COLUMN score REAL", null); - store.version = 2; - } - - // When an app is updated to the current version, the RDB store needs to be updated from version 2 to version 3. - if (store.version != 3 && store.version == 2) { - // version = 2: table structure: student (id, age, score) => version = 3: table structure: student (id, score) - store.executeSql("ALTER TABLE student DROP COLUMN age INTEGER", null); - store.version = 3; - } - }) - ``` - Stage model: - ```ts - import relationalStore from '@ohos.data.relationalStore' - import UIAbility from '@ohos.app.ability.UIAbility' - - class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage) { - var store; - const STORE_CONFIG = { - name: "RdbTest.db", - securityLevel: relationalStore.SecurityLevel.S1 - }; - - // Assume that the current RDB store version is 3. - relationalStore.getRdbStore(this.context, STORE_CONFIG, function (err, rdbStore) { - store = rdbStore; - // When an RDB store is created, the default version is 0. - if (store.version == 0) { - store.executeSql("CREATE TABLE IF NOT EXISTS student (id INTEGER PRIMARY KEY AUTOINCREMENT, score REAL);", null); - // Set the RDB store version. The input parameter must be an integer greater than 0. - store.version = 3; - } - - // When an app is updated to the current version, the RDB store needs to be updated from version 1 to version 2. - if (store.version != 3 && store.version == 1) { - // version = 1: table structure: student (id, age) => version = 2: table structure: student (id, age, score) - store.executeSql("ALTER TABLE student ADD COLUMN score REAL", null); - store.version = 2; - } - - // When an app is updated to the current version, the RDB store needs to be updated from version 2 to version 3. - if (store.version != 3 && store.version == 2) { - // version = 2: table structure: student (id, age, score) => version = 3: table structure: student (id, score) - store.executeSql("ALTER TABLE student DROP COLUMN age INTEGER", null); - store.version = 3; - } - }) - } - } - ``` - -2. Insert data. - - (1) Create a **ValuesBucket** instance to store the data you need to insert. - - (2) Call the **insert()** method to insert data into the RDB store. - - The sample code is as follows: - - ```js - let u8 = new Uint8Array([1, 2, 3]); - const valueBucket = { "name": "Tom", "age": 18, "salary": 100.5, "blobType": u8 }; - let insertPromise = store.insert("test", valueBucket); - ``` - - ```js - // Use a transaction to insert data. - try { - store.beginTransaction(); - let u8 = new Uint8Array([1, 2, 3]); - const valueBucket = { "name": "Tom", "age": 18, "salary": 100.5, "blobType": u8 }; - let promise = store.insert("test", valueBucket); - promise.then(() => { - store.commit(); - }) - } catch (err) { - console.error(`Transaction failed, err: ${err}`); - store.rollBack(); - } - ``` - -3. Query data. - - (1) Create an **RdbPredicates** object to specify query conditions. - - (2) Call the **query()** API to query data. - - (3) Call the **resultSet()** API to obtain the result. - - The sample code is as follows: - - ```js - let predicates = new relationalStore.RdbPredicates("test"); - predicates.equalTo("name", "Tom"); - let promisequery = store.query(predicates); - promisequery.then((resultSet) => { - resultSet.goToFirstRow(); - const id = resultSet.getLong(resultSet.getColumnIndex("id")); - const name = resultSet.getString(resultSet.getColumnIndex("name")); - const age = resultSet.getLong(resultSet.getColumnIndex("age")); - const salary = resultSet.getDouble(resultSet.getColumnIndex("salary")); - const blobType = resultSet.getBlob(resultSet.getColumnIndex("blobType")); - resultSet.close(); - }) - ``` - -4. Set the distributed tables to be synchronized. - - (1) Add the following permission to the permission configuration file: - - ```json - "requestPermissions": - { - "name": "ohos.permission.DISTRIBUTED_DATASYNC" - } - ``` - - (2) Obtain the required permissions. - - (3) Set the distributed tables. - - (4) Check whether the setting is successful. - - The sample code is as follows: - - ```js - let context = featureAbility.getContext(); - context.requestPermissionsFromUser(['ohos.permission.DISTRIBUTED_DATASYNC'], 666, function (result) { - console.info(`result.requestCode=${result.requestCode}`); - }) - let promise = store.setDistributedTables(["test"]); - promise.then(() => { - console.info(`setDistributedTables success.`); - }).catch((err) => { - console.error(`setDistributedTables failed, ${err}`); - }) - ``` - -5. Synchronize data across devices. - - (1) Construct an **RdbPredicates** object to specify remote devices within the network to be synchronized. - - (2) Call **rdbStore.sync()** to synchronize data. - - (3) Check whether the data synchronization is successful. - - The sample code is as follows: - - ```js - let predicate = new relationalStore.RdbPredicates('test'); - predicate.inDevices(['12345678abcde']); - let promise = store.sync(relationalStore.SyncMode.SYNC_MODE_PUSH, predicate); - promise.then((result) => { - console.info(`sync done.`); - for (let i = 0; i < result.length; i++) { - console.info(`device=${result[i][0]}, status=${result[i][1]}`); - } - }).catch((err) => { - console.error(`sync failed, err: ${err}`); - }) - ``` - -6. Subscribe to distributed data. - - (1) Register an observer to listen for distributed data changes. - - (2) When data in the RDB store changes, a callback will be invoked to return the data changes. - - The sample code is as follows: - - ```js - function storeObserver(devices) { - for (let i = 0; i < devices.length; i++) { - console.info(`device= ${devices[i]} data changed`); - } - } - - try { - store.on('dataChange', relationalStore.SubscribeType.SUBSCRIBE_TYPE_REMOTE, storeObserver); - } catch (err) { - console.error(`register observer failed, err: ${err}`); - } - ``` - -7. Query data across devices. - - (1) Obtain the distributed table name for a remote device based on the local table name. - - (2) Call the resultSet() API to obtain the result. - - The sample code is as follows: - - ```js - import deviceManager from '@ohos.distributedHardware.deviceManager' - - let deviceIds = []; - deviceManager.createDeviceManager('bundleName', (err, value) => { - if (!err) { - let devManager = value; - if (devManager != null) { - // Obtain device IDs. - let devices = devManager.getTrustedDeviceListSync(); - for (let i = 0; i < devices.length; i++) { - deviceIds[i] = devices[i].deviceId; - } - } - } - }) - - let tableName = store.obtainDistributedTableName(deviceIds[0], "test"); - let resultSet = store.querySql("SELECT * FROM " + tableName); - ``` - -8. Query data of a remote device. - - (1) Construct a predicate object for querying distributed tables, and specify the remote distributed table name and the remote device. - - (2) Call the resultSet() API to obtain the result. - - The sample code is as follows: - - ```js - let rdbPredicate = new relationalStore.RdbPredicates('employee'); - predicates.greaterThan("id", 0) ; - let promiseQuery = store.remoteQuery('12345678abcde', 'employee', rdbPredicate); - promiseQuery.then((resultSet) => { - while (resultSet.goToNextRow()) { - let idx = resultSet.getLong(0); - let name = resultSet.getString(1); - let age = resultSet.getLong(2); - console.info(`indx: ${idx}, name: ${name}, age: ${age}`); - } - resultSet.close(); - }).catch((err) => { - console.error(`failed to remoteQuery, err: ${err}`); - }) - ``` - -9. Back up and restore an RDB store. - - (1) Back up the current RDB store. - - The sample code is as follows: - - ```js - let promiseBackup = store.backup("dbBackup.db"); - promiseBackup.then(() => { - console.info(`Backup success.`); - }).catch((err) => { - console.error(`Backup failed, err: ${err}`); - }) - ``` - - (2) Restore the RDB store using the backup file. - - The sample code is as follows: - - ```js - let promiseRestore = store.restore("dbBackup.db"); - promiseRestore.then(() => { - console.info(`Restore success.`); - }).catch((err) => { - console.error(`Restore failed, err: ${err}`); - }) - ``` diff --git a/en/application-dev/database/database-relational-overview.md b/en/application-dev/database/database-relational-overview.md deleted file mode 100644 index b02f2d646b9dc8e89b1b8a32f2743c8656c6d1f9..0000000000000000000000000000000000000000 --- a/en/application-dev/database/database-relational-overview.md +++ /dev/null @@ -1,43 +0,0 @@ -# RDB Overview - -A relational database (RDB) store manages data based on relational models. With the underlying SQLite database, the RDB store provides a complete mechanism for managing data as in a local database. To satisfy different needs in complicated scenarios, the RDB store offers APIs for performing operations, such as adding, deleting, modifying, and querying data, and supports direct execution of SQL statements. After an application is uninstalled, the related RDB store will be automatically deleted. - -You do not need to care about the implementation of the database locking mechanism. - -## Basic Concepts - -- **RDB store** - - A type of database created on the basis of relational models. A RDB store holds data in rows and columns. - -- **Predicate** - - A representation of the property or feature of a data entity, or the relationship between data entities. Predicates are used to define operation conditions. - -- **Result set** - - A set of query results used to access data. You can access the required data in a result set in flexible modes. - -- **SQLite database** - - A lightweight open-source relational database management system that complies with Atomicity, Consistency, Isolation, and Durability (ACID). - -## Working Principles - -The RDB store provides common operation APIs for external systems. It uses the SQLite as the underlying persistent storage engine, which supports all SQLite database features. - -**Figure 1** Working mechanism - -![how-rdb-works](figures/how-rdb-works.png) - -## Default Settings - -- The default RDB logging mode is Write Ahead Log (WAL). -- The default data flushing mode is **FULL** mode. -- The default size of the shared memory used by an OpenHarmony database is 2 MB. - -## Constraints - -- An RDB store can be connected to a maximum of four connection pools to manage read and write operations. - -- To ensure data accuracy, the RDB store supports only one write operation at a time. diff --git a/en/application-dev/database/figures/dataManagement.jpg b/en/application-dev/database/figures/dataManagement.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a43ca576222ad1da550242ed34c5f82700d52392 Binary files /dev/null and b/en/application-dev/database/figures/dataManagement.jpg differ diff --git a/en/application-dev/database/figures/dataShare.jpg b/en/application-dev/database/figures/dataShare.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd6b1d08eeedd0cc50c6c9813b064ca4504ffa98 Binary files /dev/null and b/en/application-dev/database/figures/dataShare.jpg differ diff --git a/en/application-dev/database/figures/deviceKVStore.jpg b/en/application-dev/database/figures/deviceKVStore.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed4b8aadd266d8531a24cb9ef6b72bfd33258759 Binary files /dev/null and b/en/application-dev/database/figures/deviceKVStore.jpg differ diff --git a/en/application-dev/database/figures/distributedObject.jpg b/en/application-dev/database/figures/distributedObject.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a877e89bc5714fe730e05130d1dd0a89b135151d Binary files /dev/null and b/en/application-dev/database/figures/distributedObject.jpg differ diff --git a/en/application-dev/database/figures/distributedObject_sync.jpg b/en/application-dev/database/figures/distributedObject_sync.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f85325f29cf85692711a89367ddb3dd70d3b9b5a Binary files /dev/null and b/en/application-dev/database/figures/distributedObject_sync.jpg differ diff --git a/en/application-dev/database/figures/distributedObject_syncView.jpg b/en/application-dev/database/figures/distributedObject_syncView.jpg new file mode 100644 index 0000000000000000000000000000000000000000..751aaab344c417316b5054ef9ff1d4c14d1b1288 Binary files /dev/null and b/en/application-dev/database/figures/distributedObject_syncView.jpg differ diff --git a/en/application-dev/database/figures/en-us_image_0000001183386164.png b/en/application-dev/database/figures/en-us_image_0000001183386164.png deleted file mode 100644 index e1fb8747d48c315d0e88504135c0bd388cc81077..0000000000000000000000000000000000000000 Binary files a/en/application-dev/database/figures/en-us_image_0000001183386164.png and /dev/null differ diff --git a/en/application-dev/database/figures/en-us_image_0000001542496993.png b/en/application-dev/database/figures/en-us_image_0000001542496993.png new file mode 100644 index 0000000000000000000000000000000000000000..de0fa2afbd5c9301700618436126ca60e2666a06 Binary files /dev/null and b/en/application-dev/database/figures/en-us_image_0000001542496993.png differ diff --git a/en/application-dev/database/figures/en_DataShare.png b/en/application-dev/database/figures/en_DataShare.png deleted file mode 100644 index b56e96d3bb3ff338f082efd75959d9b15cea6a86..0000000000000000000000000000000000000000 Binary files a/en/application-dev/database/figures/en_DataShare.png and /dev/null differ diff --git a/en/application-dev/database/figures/how-distributedobject-works.png b/en/application-dev/database/figures/how-distributedobject-works.png deleted file mode 100644 index 33785a3fd4c66624b298b1aa36959dbf635d2343..0000000000000000000000000000000000000000 Binary files a/en/application-dev/database/figures/how-distributedobject-works.png and /dev/null differ diff --git a/en/application-dev/database/figures/how-rdb-works.png b/en/application-dev/database/figures/how-rdb-works.png deleted file mode 100644 index 35e10dc3a07aef0e85a8f02332c3c6cd9d605448..0000000000000000000000000000000000000000 Binary files a/en/application-dev/database/figures/how-rdb-works.png and /dev/null differ diff --git a/en/application-dev/database/figures/kvStore.jpg b/en/application-dev/database/figures/kvStore.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98f3274ee425215f7027a835fbb1f98afdbd4f16 Binary files /dev/null and b/en/application-dev/database/figures/kvStore.jpg differ diff --git a/en/application-dev/database/figures/kvStore_development_process.png b/en/application-dev/database/figures/kvStore_development_process.png new file mode 100644 index 0000000000000000000000000000000000000000..e5babf2b70994940afbb6e205c05ae96128a82d7 Binary files /dev/null and b/en/application-dev/database/figures/kvStore_development_process.png differ diff --git a/en/application-dev/database/figures/preferences.jpg b/en/application-dev/database/figures/preferences.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fea03ea640a6a88184efbc2a94de6f1d0a8da918 Binary files /dev/null and b/en/application-dev/database/figures/preferences.jpg differ diff --git a/en/application-dev/database/figures/preferences.png b/en/application-dev/database/figures/preferences.png deleted file mode 100644 index be3c6feef3c1f8d9da83e5d3c0065655bd07cb57..0000000000000000000000000000000000000000 Binary files a/en/application-dev/database/figures/preferences.png and /dev/null differ diff --git a/en/application-dev/database/figures/relationStore_local.jpg b/en/application-dev/database/figures/relationStore_local.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d09a410c6e4036b637ec97861106e8c178969ad6 Binary files /dev/null and b/en/application-dev/database/figures/relationStore_local.jpg differ diff --git a/en/application-dev/database/figures/relationalStore_sync.jpg b/en/application-dev/database/figures/relationalStore_sync.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cba72cb076d58b2422c2939d883ca3b611e570bf Binary files /dev/null and b/en/application-dev/database/figures/relationalStore_sync.jpg differ diff --git a/en/application-dev/database/figures/silent_dataShare.jpg b/en/application-dev/database/figures/silent_dataShare.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6b1fce306ed8824acc343adf4359f29f4fa31b4 Binary files /dev/null and b/en/application-dev/database/figures/silent_dataShare.jpg differ diff --git a/en/application-dev/database/figures/singleKVStore.jpg b/en/application-dev/database/figures/singleKVStore.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0f95d076872c9607238503407cb77c535392747 Binary files /dev/null and b/en/application-dev/database/figures/singleKVStore.jpg differ diff --git a/en/application-dev/database/share-data-by-datashareextensionability.md b/en/application-dev/database/share-data-by-datashareextensionability.md new file mode 100644 index 0000000000000000000000000000000000000000..7f70ab30d4c04c421c1e18032a0da13e590f80a7 --- /dev/null +++ b/en/application-dev/database/share-data-by-datashareextensionability.md @@ -0,0 +1,241 @@ +# Sharing Data Using DataShareExtensionAbility + + +## When to Use + +If complex services are involved in cross-application data access, you can use **DataShareExtensionAbility** to start the application of the data provider to implement data access. + +You need to implement flexible service logics via callbacks of the service provider. + + +## Working Principles + +There are two roles in **DataShare**: + +- Data provider: implements operations, such as adding, deleting, modifying, and querying data, and opening a file, using [DataShareExtensionAbility](../reference/apis/js-apis-application-dataShareExtensionAbility.md). + +- Data consumer: accesses the data provided by the provider using [createDataShareHelper()](../reference/apis/js-apis-data-dataShare.md#datasharecreatedatasharehelper). + +**Figure 1** Data sharing mechanism + +![dataShare](figures/dataShare.jpg) + +- The **DataShareExtensionAbility** module, as the data provider, implements services related to data sharing between applications. + +- The **DataShareHelper** module, as the data consumer, provides APIs for accessing data, including adding, deleting, modifying, and querying data. + +- The data consumer communicates with the data provider via inter-process communication (IPC). The data provider can be implemented through a database or other data storage. + +- The **ResultSet** module is implemented through shared memory. Shared memory stores the result sets, and interfaces are provided to traverse result sets. + + +## How to Develop + + +### Data Provider Application Development (Only for System Applications) + +[DataShareExtensionAbility](../reference/apis/js-apis-application-dataShareExtensionAbility.md) provides the following APIs. You can override these APIs as required. + +- **onCreate**: called by the server to initialize service logic when the DataShare client connects to the DataShareExtensionAbility server. + +- **insert**: called to insert data upon the request of the client. Data insertion must be implemented in this callback on the server. + +- **update**: called to update data upon the request of the client. Data update must be implemented in this callback on the server. + +- **delete**: called to delete data upon the request of the client. Data deletion must be implemented in this callback on the server. + +- **query**: called to query data upon the request of the client. Data query must be implemented in this callback on the server. + +- **batchInsert**: called to batch insert data upon the request of the client. Batch data insertion must be implemented in this callback on the server. + +- **normalizeUri**: converts the URI provided by the client to the URI used by the server. + +- **denormalizeUri**: converts the URI used by the server to the initial URI passed by the client. + +Before implementing a **DataShare** service, you need to create a **DataShareExtensionAbility** object in the DevEco Studio project as follows: + +1. In the **ets** directory of the **Module** project, right-click and choose **New > Directory** to create a directory named **DataShareExtAbility**. + +2. Right-click the **DataShareAbility** directory, and choose **New > TypeScript File** to create a file named **DataShareExtAbility.ts**. + +3. Import **@ohos.application.DataShareExtensionAbility** and other dependencies to the **DataShareExtAbility.ts** file, and +override the service implementation as required. For example, if the data provider provides only the data insertion, deletion, and query services, you can override only these APIs. + + ```js + import Extension from '@ohos.application.DataShareExtensionAbility'; + import rdb from '@ohos.data.relationalStore'; + import dataSharePredicates from '@ohos.data.dataSharePredicates'; + ``` + +4. Implement the data provider services. For example, implement data storage of the data provider by using a database, reading and writing files, or accessing the network. + + ```js + const DB_NAME = 'DB00.db'; + const TBL_NAME = 'TBL00'; + const DDL_TBL_CREATE = "CREATE TABLE IF NOT EXISTS " + + TBL_NAME + + ' (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, age INTEGER, isStudent BOOLEAN, Binary BINARY)'; + + let rdbStore; + let result; + + export default class DataShareExtAbility extends Extension { + private rdbStore_; + + // Override onCreate(). + onCreate(want, callback) { + result = this.context.cacheDir + '/datashare.txt'; + // Create an RDB store. + rdb.getRdbStore(this.context, { + name: DB_NAME, + securityLevel: rdb.SecurityLevel.S1 + }, function (err, data) { + rdbStore = data; + rdbStore.executeSql(DDL_TBL_CREATE, [], (err) => { + console.info(`DataShareExtAbility onCreate, executeSql done err:${err}`); + }); + if (callback) { + callback(); + } + }); + } + + // Override query(). + query(uri, predicates, columns, callback) { + if (predicates === null || predicates === undefined) { + console.info('invalid predicates'); + } + try { + rdbStore.query(TBL_NAME, predicates, columns, (err, resultSet) => { + if (resultSet !== undefined) { + console.info(`resultSet.rowCount:${resultSet.rowCount}`); + } + if (callback !== undefined) { + callback(err, resultSet); + } + }); + } catch (err) { + console.error(`Failed to query. Code:${err.code},message:${err.message}`); + } + } + // Override other APIs as required. + }; + ``` + +5. Define **DataShareExtensionAbility** in **module.json5**. + + **Table 1** Fields in module.json5 + + | Field| Description| Mandatory| + | -------- | -------- | -------- | + | name | Ability name, corresponding to the **ExtensionAbility** class name derived from **Ability**.| Yes| + | type | Ability type. The value is **dataShare**, indicating the development is based on the **datashare** template.| Yes| + | uri | URI used for communication. It is the unique identifier for the data consumer to connect to the provider.| Yes| + | exported | Whether it is visible to other applications. Data sharing is allowed only when the value is **true**.| Yes| + | readPermission | Permission required for accessing data. If this parameter is not set, the read permission is not verified by default.| No| + | writePermission | Permission required for modifying data. If this parameter is not set, write permission verification is not performed by default.| No| + | metadata | Configuration for silent access, including the **name** and **resource** fields.
The **name** field identifies the configuration, which has a fixed value of **ohos.extension.dataShare**.
The **resource** field has a fixed value of **$profile:data_share_config**, which indicates that the profile name is **data_share_config.json**.| **metadata** is mandatory when the ability launch type is **singleton**. For details about the ability launch type, see **launchType** in the [Internal Structure of the abilities Attribute](../quick-start/module-structure.md#internal-structure-of-the-abilities-attribute).| + + **module.json5 example** + + ```json + "extensionAbilities": [ + { + "srcEntry": "./ets/DataShareExtAbility/DataShareExtAbility.ts", + "name": "DataShareExtAbility", + "icon": "$media:icon", + "description": "$string:description_datashareextability", + "type": "dataShare", + "uri": "datashare://com.samples.datasharetest.DataShare", + "exported": true, + "metadata": [{"name": "ohos.extension.dataShare", "resource": "$profile:data_share_config"}] + } + ] + ``` + + **Table 2** Fields in the data_share_config.json file + + | Field| Description | Mandatory| + | ------------ | ------------------------------------------------------------ | --- | + | tableConfig | Label configuration.| Yes| + | uri | Range for which the configuration takes effect. The URI supports the following formats in descending order by priority:
1. *****: indicates all databases and tables.
2. **datashare:///{*bundleName*}/{*moduleName*}/{*storeName*}**: specifies a database.
3. **datashare:///{*bundleName*}/{*moduleName*}/{*storeName*}/{*tableName*}**: specifies a table.
If URIs of different formats are configured, only the URI with higher priority takes effect. | Yes| + | crossUserMode | Whether data is shared by multiple users. The value **1** means to share data between multiple users, and the value **2** means the opposite. | Yes| + + **data_share_config.json Example** + + ```json + "tableConfig": [ + { + "uri": "*", + "crossUserMode": 1 + }, + { + "uri": "datashare:///com.acts.datasharetest/entry/DB00", + "crossUserMode": 1 + }, + { + "uri": "datashare:///com.acts.datasharetest/entry/DB00/TBL00", + "crossUserMode": 2 + } + ] + ``` + + +### Data Consumer Application Development + +1. Import the dependencies. + + ```js + import UIAbility from '@ohos.app.ability.UIAbility'; + import dataShare from '@ohos.data.dataShare'; + import dataSharePredicates from '@ohos.data.dataSharePredicates'; + ``` + +2. Define the URI string for communicating with the data provider. + + ```js + // Different from the URI defined in the module.json5 file, the URI passed in the parameter has an extra slash (/), because there is a DeviceID parameter between the second and the third slash (/). + let dseUri = ('datashare:///com.samples.datasharetest.DataShare'); + ``` + +3. Create a **DataShareHelper** instance. + + ```js + let dsHelper; + let abilityContext; + + export default class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage) { + abilityContext = this.context; + dataShare.createDataShareHelper(abilityContext, dseUri, (err, data) => { + dsHelper = data; + }); + } + } + ``` + +4. Use the APIs provided by **DataShareHelper** to access the services provided by the provider, for example, adding, deleting, modifying, and querying data. + + ```js + // Construct a piece of data. + let valuesBucket = { 'name': 'ZhangSan', 'age': 21, 'isStudent': false, 'Binary': new Uint8Array([1, 2, 3]) }; + let updateBucket = { 'name': 'LiSi', 'age': 18, 'isStudent': true, 'Binary': new Uint8Array([1, 2, 3]) }; + let predicates = new dataSharePredicates.DataSharePredicates(); + let valArray = ['*']; + // Insert a piece of data. + dsHelper.insert(dseUri, valuesBucket, (err, data) => { + console.info(`dsHelper insert result:${data}`); + }); + // Update data. + dsHelper.update(dseUri, predicates, updateBucket, (err, data) => { + console.info(`dsHelper update result:${data}`); + }); + // Query data. + dsHelper.query(dseUri, predicates, valArray, (err, data) => { + console.info(`dsHelper query result:${data}`); + }); + // Delete data. + dsHelper.delete(dseUri, predicates, (err, data) => { + console.info(`dsHelper delete result:${data}`); + }); + ``` diff --git a/en/application-dev/database/share-data-by-silent-access.md b/en/application-dev/database/share-data-by-silent-access.md new file mode 100644 index 0000000000000000000000000000000000000000..142642f98646003c675fcbd15d9369b6664948a6 --- /dev/null +++ b/en/application-dev/database/share-data-by-silent-access.md @@ -0,0 +1,46 @@ +# Data Sharing Through Silent Access + + +## When to Use + +According to big data statistics, in a typical cross-application data access scenario, applications are started nearly 83 times on average in a day. + +To reduce the number of application startup times and improve the access speed, OpenHarmony provides the silent access feature, which allows direct access to the database without starting the data provider. + +Silent access supports only basic database access. If service processing is required, implement service processing in the data consumer. + +If the service processing is complex, use **DataShareExtensionAbility** to start the data provider. + + +## Working Principles + +**Figure 1** Silent access + +![silent_dataShare](figures/silent_dataShare.jpg) + +- In silent access, **DatamgrService** obtains the access rules configured by the data provider through directory mapping, performs preprocessing based on rules, and accesses the database. + +- To use silent access, the URIs must be in the following format: + datashare:///{bundleName}/{moduleName}/{storeName}/{tableName}?Proxy=true + + "Proxy=true" means to access data without starting the data provider. If **Proxy** is not set to **true**, the data provider is started. + + The **DatamgrService** obtains the data provider application based on **bundleName**, reads the configuration, verifies the permission, and accesses data. + + +## Constraints + +- Currently, only RDB stores support silent access. + +- The system supports a maximum of 16 concurrent query operations. Excess query requests need to be queued for processing. + +- A proxy cannot be used to create a database. If a database needs to be created, the data provider must be started. + + +## How to Develop + +The URI must be in the following format: + +datashare:///{bundleName}/{moduleName}/{storeName}/{tableName}?Proxy=true + +For details about the development procedure and implementation, see [Sharing Data Using DataShareExtensionAbility](share-data-by-datashareextensionability.md). diff --git a/en/application-dev/database/share-device-data-across-apps-overview.md b/en/application-dev/database/share-device-data-across-apps-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..6392e579a8e078e4ff41751aa2ebcc1481ac58a0 --- /dev/null +++ b/en/application-dev/database/share-device-data-across-apps-overview.md @@ -0,0 +1,47 @@ +# Cross-Application Data Sharing Overview + +## Function + +The application data on a device, such as the Contacts, short message service (SMS), and Gallery data, always needs to be shared with other applications. However, certain data, such as the accounts and passwords, cannot be shared. Certain data, such as SMS messages, can be accessed but not modified by other applications. The **DataShare** module provides a secure and easy-to-use mechanism for sharing data of an application with other applications on the same device. + +## Basic Concepts + +Before developing cross-application data sharing on a device, understand the following concepts: + +- Data provider: an application that provides data and implements related services. It is also called the data producer or server. + +- Data consumer: an application that accesses the data or services provided by the data provider. It is also called the client. + +- **ValuesBucket**: a set of data to be inserted. It can be one or more data records in KV pairs. In each KV pair, the key must be of the string type, and the value can be a number, a string, a Boolean value, or an unsigned integer array. + +- **ResultSet**: a set of query results. It provides flexible modes for obtaining various data. + +- **Predicates**: an object that specifies the conditions for updating, deleting, or querying data in a database. + + +## Implementation + +The data provider can directly use **DataShare** to share data with other applications without complex encapsulation. The data consumer only needs to use a set of APIs to access the data, because the **DataShare** access mode does not vary with the data provisioning mode. This greatly reduces the learning time and development difficulty. + +The cross-application data sharing can be implemented in either of the following ways: + +- **DataShareExtensionAbility** + + You can implement an ExtensionAbility with a callback in the HAP. When the data consumer calls an API, the ExtensionAbility of the data provider will be automatically started to invoke the registered callback. + + This method is recommended when the cross-application data access involves service operations other than mere addition, deletion, modification, and query of data in databases. + +- Silent access + + You can configure database access rules in the HAP. When the data consumer calls an API, the system ability automatically obtains the access rules in the HAP and returns data without starting the data provider. + + This method is recommended when the cross-application data access involves only the operations for adding, deleting, modifying, and querying data in databases. + + +## Constraints + +- **DataShare** is subject to the limitations on the database used by the data provider. The supported data models, length of the keys and values, and maximum number of databases that can be accessed at a time by each application vary with the database in use. + +- The payloads of **ValuesBucket**, **Predicates**, and **ResultSet** are restricted by inter-process communication (IPC). + +- Currently, **dataShare** supports development based on the stage model only. diff --git a/en/application-dev/database/sync-app-data-across-devices-overview.md b/en/application-dev/database/sync-app-data-across-devices-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..875b70fef7d4b3bcd6b78a2102ec40c80d7da5f9 --- /dev/null +++ b/en/application-dev/database/sync-app-data-across-devices-overview.md @@ -0,0 +1,49 @@ +# Overview of Distributed Application Data Synchronization + + +## When to Use + +The distributed application data synchronization allows the data of an application to be synchronized with other devices that are connected to form a Virtual Device. This feature enables seamless synchronization, modification, and query of use application data across trusted devices. + +For example, when data is added, deleted, or modified for an application on a device, the same application on another device can obtain the updated data. You can use this feature in the distributed Gallery, Notepad, Contacts, and File Manager. + +For details about how to subscribe to database change notifications between different applications, see [Sharing Application Data with Other Applications](share-device-data-across-apps-overview.md). + +The data storage modes vary depending on the lifecycle of data to be synchronized: + +- Temporary data has a short lifecycle and is usually stored in memory. For example, distributed data objects are recommended for process data generated by game applications. + +- Persistent data has a long lifecycle and needs to be stored in databases. You can use RDB stores or KV stores based on data characteristics and relationships. For example, RDB stores are recommended for storing Gallery attribute information, such as albums, covers, and images, and KV stores are recommended for storing Gallery image thumbnails. + + +## Basic Concepts + +In a distributed scenario, cross-device collaboration demands consistent data between the devices in the same network. + + +The data consistency can be classified into the following types: + + +- Strong consistency: When data is inserted, deleted, or modified on a device, other devices in the same network can obtain the updates eventually, but may not immediately. + +- Weak consistency: When data is added, deleted, or modified on a device, other devices in the same network may or may not obtain the updates. The data on these devices may be inconsistent after a certain period of time. + +- Eventual consistency: When data is added, deleted, or modified on a device, other devices in the same network may not obtain the updates immediately. However, data on these devices will become consistent after a certain period of time. + + +Strong consistency has high requirements on distributed data management and may be used in distributed server deployment. Because mobile devices are not always online and there is no central node, the cross-device application data synchronization supports eventual consistency only. + + +## Access Control Mechanism in Cross-Device Synchronization + +In the application data synchronization across devices, data access is controlled based on the device level and [data security label](access-control-by-device-and-data-level.md#data-security-labels). In principle, data can be synchronized only to the devices whose data security labels are not higher than the device's security level. The access control matrix is as follows: + +|Device Security Level|Data Security Labels of the Synchornizable Device| +|---|---| +|SL1|S1| +|SL2|S1 to S2| +|SL3|S1 to S3| +|SL4|S1 to S4| +|SL5|S1 to S4| + +For example, the security level of development boards RK3568 and Hi3516 is SL1. The database with data security label S1 can be synchronized with RK3568 and Hi3516, but the database with database labels S2-S4 cannot. diff --git a/en/application-dev/device/inputdevice-guidelines.md b/en/application-dev/device/inputdevice-guidelines.md index da6eef71d750b74e01d1ea8a9eaaf49b1bf598cb..c15955d9b01239605d0ce1afa9bfe5f693b22940 100644 --- a/en/application-dev/device/inputdevice-guidelines.md +++ b/en/application-dev/device/inputdevice-guidelines.md @@ -29,7 +29,6 @@ When a user enters text, the input method determines whether to launch the virtu 1. Call the **getDeviceList** API to obtain the list of connected input devices. Call the **getKeyboardType** API to traverse all connected devices to check whether a physical keyboard exists. If a physical keyboard exists, mark the physical keyboard as connected. This step ensures that your application detects all inserted input devices before listening for device hot swap events. 2. Call the **on** API to listen for device hot swap events. If a physical keyboard is inserted, mark the physical keyboard as connected. If a physical keyboard is removed, mark the physical keyboard as disconnected. -3. When a user enters text, check whether a physical keyboard is connected. If a physical keyboard is not connected, launch the virtual keyboard. ```js @@ -65,6 +64,4 @@ try { } catch (error) { console.log(`Execute failed, error: ${JSON.stringify(error, [`code`, `message`])}`); } - // 3. Determine whether to launch the virtual keyboard based on the value of isPhysicalKeyboardExist. - // TODO ``` diff --git a/en/application-dev/device/usb-guidelines.md b/en/application-dev/device/usb-guidelines.md index c4f5131536a1f0b55ae973bdf7cdf04d2b8f0980..68c8c3de013e75d56854bf0cf0e3a71aca9eb261 100644 --- a/en/application-dev/device/usb-guidelines.md +++ b/en/application-dev/device/usb-guidelines.md @@ -130,8 +130,6 @@ You can set a USB device as the USB host to connect to other USB devices for dat usb.bulkTransfer(pipe, inEndpoint, dataUint8Array, 15000).then(dataLength => { if (dataLength >= 0) { console.info("usb readData result Length : " + dataLength); - let resultStr = this.ab2str(dataUint8Array); // Convert uint8 data into a string. - console.info("usb readData buffer : " + resultStr); } else { console.info("usb readData failed : " + dataLength); } diff --git a/en/application-dev/dfx/apprecovery-guidelines.md b/en/application-dev/dfx/apprecovery-guidelines.md index 4a82385816bd7f51ca0dadf85aff9961f84b94ca..e5e7b5274361a25009d33b694943b59ef2d9d8a1 100644 --- a/en/application-dev/dfx/apprecovery-guidelines.md +++ b/en/application-dev/dfx/apprecovery-guidelines.md @@ -9,7 +9,7 @@ Application recovery helps to restore the application state and save temporary d In API version 9, application recovery is supported only for a single ability of the application developed using the stage model. Application state saving and automatic restart are performed when a JsError occurs. -In API version 10, application recovery is also supported for multiple abilities of the application developed using the stage model. Application state storage and restore are performed when an AppFreeze occurs. If an application is killed in control mode, the application state will be restored upon next startup. +In API version 10, application recovery is applicable to multiple abilities of an application developed using the stage model. Application state storage and restore are performed when an AppFreeze occurs. If an application is killed in control mode, the application state will be restored upon next startup. ## Available APIs @@ -37,18 +37,15 @@ No error will be thrown if the preceding APIs are used in the troubleshooting sc ### Application State Management Since API version 10, application recovery is not limited to automatic restart in the case of an exception. Therefore, you need to understand when the application will load the saved state. - If the last exit of an application is not initiated by a user and a saved state is available for recovery, the startup reason is set to **APP_RECOVERY** when the application is started by the user next time, and the recovery state of the application is cleared. The application recovery status flag is set when **saveAppState** is actively or passively called. The flag is cleared when the application exits normally or the saved state is consumed. (A normal exit is usually triggered by pressing the back key or clearing recent tasks.) ![Application recovery status management](./figures/application_recovery_status_management.png) ### Application State Saving and Restore - API version 10 or later supports saving of the application state when an application is suspended. If a JsError occurs, **onSaveState** is called in the main thread. If an AppFreeze occurs, however, the main thread may be suspended, and therefore **onSaveState** is called in a non-main thread. The following figure shows the main service flow. ![Application recovery from the freezing state](./figures/application_recovery_from_freezing.png) - When the application is suspended, the callback is not executed in the JS thread. Therefore, you are advised not to use the imported dynamic Native library or access the **thread_local** object created by the main thread in the code of the **onSaveState** callback. ### Framework Fault Management @@ -62,13 +59,9 @@ Fault management is an important way for applications to deliver a better user e - Fault query is the process of calling APIs of [faultLogger](../reference/apis/js-apis-faultLogger.md) to obtain the fault information. The figure below does not illustrate the time when [faultLogger](../reference/apis/js-apis-faultLogger.md) is called. You can refer to the [LastExitReason](../reference/apis/js-apis-app-ability-abilityConstant.md#abilityconstantlastexitreason) passed during application initialization to determine whether to call [faultLogger](../reference/apis/js-apis-faultLogger.md) to query information about the previous fault. - ![Fault rectification process](./figures/fault_rectification.png) - It is recommended that you call [errorManager](../reference/apis/js-apis-app-ability-errorManager.md) to handle the exception. After the processing is complete, you can call the **saveAppState** API and restart the application. - If you do not register [ErrorObserver](../reference/apis/js-apis-inner-application-errorObserver.md) or enable application recovery, the application process will exit according to the default processing logic of the system. Users can restart the application from the home screen. - If you have enabled application recovery, the recovery framework first checks whether application state saving is supported and whether the application state saving is enabled. If so, the recovery framework invokes [onSaveState](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonsavestate) of the [Ability](../reference/apis/js-apis-app-ability-uiAbility.md). Finally, the application is restarted. ### Supported Application Recovery Scenarios @@ -132,12 +125,12 @@ import AbilityConstant from '@ohos.app.ability.AbilityConstant'; #### Actively Saving the Application State and Restoring Data -- Define and register the [ErrorObserver](../reference/apis/js-apis-inner-application-errorObserver.md) callback. +- Define and register the [ErrorObserver](../reference/apis/js-apis-inner-application-errorObserver.md) callback. For details about its usage, see [errorManager](../reference/apis/js-apis-app-ability-errorManager.md). ```ts var registerId = -1; var callback = { - onUnhandledException: function (errMsg) { + onUnhandledException(errMsg) { console.log(errMsg); appRecovery.saveAppState(); appRecovery.restartApp(); @@ -149,7 +142,7 @@ import AbilityConstant from '@ohos.app.ability.AbilityConstant'; console.log("[Demo] EntryAbility onWindowStageCreate") globalThis.registerObserver = (() => { - registerId = errorManager.registerErrorObserver(callback); + registerId = errorManager.on('error', callback); }) windowStage.loadContent("pages/index", null); @@ -165,7 +158,7 @@ After the callback triggers **appRecovery.saveAppState()**, **onSaveState(state, // Ability has called to save app data console.log("[Demo] EntryAbility onSaveState") wantParams["myData"] = "my1234567"; - return AbilityConstant.onSaveResult.ALL_AGREE; + return AbilityConstant.OnSaveResult.ALL_AGREE; } ``` @@ -195,8 +188,8 @@ onWindowStageDestroy() { console.log("[Demo] EntryAbility onWindowStageDestroy") globalThis.unRegisterObserver = (() => { - errorManager.unregisterErrorObserver(registerId, (result) => { - console.log("[Demo] result " + result.code + ";" + result.message) + errorManager.off('error', registerId, (err) => { + console.error("[Demo] err:", err); }); }) } @@ -224,7 +217,24 @@ export default class EntryAbility extends Ability { // Ability has called to save app data console.log("[Demo] EntryAbility onSaveState") wantParams["myData"] = "my1234567"; - return AbilityConstant.onSaveResult.ALL_AGREE; + return AbilityConstant.OnSaveResult.ALL_AGREE; + } +} +``` + +#### Restart Flag for the Failed Ability + +If the failed ability is restarted again, the [ABILITY_RECOVERY_RESTART](../reference/apis/js-apis-app-ability-wantConstant.md#wantconstantparams) flag will be added as a **parameters** member for the **want** parameter in **onCreate** and its value is **true**. + +```ts +import UIAbility from '@ohos.app.ability.UIAbility'; +import wantConstant from '@ohos.app.ability.wantConstant'; +export default class EntryAbility extends UIAbility { + onCreate(want, launchParam) { + if (want.parameters[wantConstant.Params.ABILITY_RECOVERY_RESTART] != undefined && + want.parameters[wantConstant.Params.ABILITY_RECOVERY_RESTART] == true) { + console.log("This ability need to recovery"); + } } } ``` diff --git a/en/application-dev/dfx/errormanager-guidelines.md b/en/application-dev/dfx/errormanager-guidelines.md index 4679cfcfc78893590fe73eab770e49fc68a1a828..8509fff09e51cd31665e80fab6dce0f2472ab08d 100644 --- a/en/application-dev/dfx/errormanager-guidelines.md +++ b/en/application-dev/dfx/errormanager-guidelines.md @@ -23,7 +23,8 @@ When an asynchronous callback is used, the return value can be processed directl | API | Description | | ------------------------------ | ------------------------------------------------------------ | -| onUnhandledException(errMsg: string): void | Called when an application generates an uncaught exception after being registered.| +| onUnhandledException(errMsg: string): void | Called when an uncaught exception is reported after the application is registered.| +| onException?(errObject: Error): void | Called when an application exception is reported to the JavaScript layer after the application is registered.| ### Result Codes for Unregistering an Observer @@ -43,6 +44,13 @@ let registerId = -1; let callback = { onUnhandledException: function (errMsg) { console.log(errMsg); + }, + onException: function (errorObj) { + console.log('onException, name: ', errorObj.name); + console.log('onException, message: ', errorObj.message); + if (typeof(errorObj.stack) === 'string') { + console.log('onException, stack: ', errorObj.stack); + } } } diff --git a/en/application-dev/dfx/figures/fault_rectification.png b/en/application-dev/dfx/figures/fault_rectification.png index e5831ac2b5aefc33a955ad98cd76f41ad28a7f70..a178b2691616d406d2668806ffcd4f89c8ca82a3 100644 Binary files a/en/application-dev/dfx/figures/fault_rectification.png and b/en/application-dev/dfx/figures/fault_rectification.png differ diff --git a/en/application-dev/dfx/hiappevent-guidelines.md b/en/application-dev/dfx/hiappevent-guidelines.md index 640b9185ee236dbe0fb5dfe3808b14322a401a23..569b16d587af811d32e425a534ab4dc0df6a4be6 100644 --- a/en/application-dev/dfx/hiappevent-guidelines.md +++ b/en/application-dev/dfx/hiappevent-guidelines.md @@ -146,9 +146,3 @@ The following example illustrates how to log and subscribe to button click event HiAppEvent eventPkg.size=124 HiAppEvent eventPkg.info={"domain_":"button","name_":"click","type_":4,"time_":1670268234523,"tz_":"+0800","pid_":3295,"tid_":3309,"click_time":100} ``` - -## Samples - -The following sample is provided to help you better understand how to develop the application event logging feature: - -- [`JsDotTest`: Event Logging (JS) (API8)](https://gitee.com/openharmony/applications_app_samples/tree/master/DFX/JsDotTest) diff --git a/en/application-dev/faqs/Readme-EN.md b/en/application-dev/faqs/Readme-EN.md index 7eb9cad6b546996a47e92cd01b03f783a1f4a6d2..740e2bbdaf4a1aadc015c9e683da5f02940ab61a 100644 --- a/en/application-dev/faqs/Readme-EN.md +++ b/en/application-dev/faqs/Readme-EN.md @@ -2,21 +2,20 @@ - [Programming Languages](faqs-language.md) - [Ability Framework Development](faqs-ability.md) -- [Bundle Management Development](faqs-bundle.md) +- [Resource Manager Development](faqs-globalization.md) - [ArkUI (ArkTS) Development](faqs-ui-ets.md) - [ArkUI Web Component (ArkTS) Development](faqs-web-arkts.md) - [ArkUI (JavaScript) Development](faqs-ui-js.md) - [Common Event and Notification Development](faqs-event-notification.md) - [Graphics and Image Development](faqs-graphics.md) -- [File Management Development](faqs-file-management.md) - [Media Development](faqs-media.md) -- [Network and Connection Development](faqs-connectivity.md) -- [Data Management Development](faqs-data-management.md) -- [Device Management Development](faqs-device-management.md) +- [Basic Security Capability Development](faqs-security.md) +- [Application Access Control Development](faqs-ability-access-control.md) +- [Data Management Development](faqs-distributed-data-management.md) +- [File Management Development](faqs-file-management.md) +- [Network Management Development](faqs-network-management.md) - [DFX Development](faqs-dfx.md) -- [Intl Development](faqs-international.md) - [Native API Usage](faqs-native.md) +- [Startup Development](faqs-startup.md) - [Usage of Third- and Fourth-Party Libraries](faqs-third-party-library.md) - [IDE Usage](faqs-ide.md) -- [hdc_std Command Usage](faqs-hdc-std.md) -- [Development Board](faqs-development-board.md) \ No newline at end of file diff --git a/en/application-dev/faqs/faqs-ability-access-control.md b/en/application-dev/faqs/faqs-ability-access-control.md new file mode 100644 index 0000000000000000000000000000000000000000..f336120a2b131eb75604fa3e51b1b8ff0130072e --- /dev/null +++ b/en/application-dev/faqs/faqs-ability-access-control.md @@ -0,0 +1,7 @@ +# Application Access Control Development + +## Can the app listen for the permission change after its permission is modified in Settings? + +Applicable to: OpenHarmony 3.1 Beta 5 (API version 9) + +Third-party apps cannot listen for the permission change. diff --git a/en/application-dev/faqs/faqs-bundle.md b/en/application-dev/faqs/faqs-bundle.md index 61a5277c6d4a1493d0281fdd66b88a99a07141ae..fda41c42bccc357d6b8800ce3f5401e1e2abbceb 100644 --- a/en/application-dev/faqs/faqs-bundle.md +++ b/en/application-dev/faqs/faqs-bundle.md @@ -14,7 +14,7 @@ Applicable to: OpenHarmony SDK 3.2.3.5, stage model of API version 9 Obtain the bundle name through **context.abilityInfo.bundleName**. -Reference: [AbilityContext](../reference/apis/js-apis-ability-context.md) and [AbilityInfo](../reference/apis/js-apis-bundle-AbilityInfo.md) +Reference: [AbilityInfo](../reference/apis/js-apis-bundle-AbilityInfo.md) ## How do I obtain an application icon? diff --git a/en/application-dev/faqs/faqs-connectivity.md b/en/application-dev/faqs/faqs-connectivity.md deleted file mode 100644 index 31e1db2e15e82875427d52a92dd26bcfeb69c34e..0000000000000000000000000000000000000000 --- a/en/application-dev/faqs/faqs-connectivity.md +++ /dev/null @@ -1,33 +0,0 @@ -# Network and Connection Development - - - -## What are the data formats supported by extraData in an HTTP request? - -Applicable to: OpenHarmony SDK 3.2.2.5, stage model of API version 9 - -**extraData** indicates additional data in an HTTP request. It varies depending on the HTTP request method. - -- If the HTTP request uses a POST or PUT method, **extraData** serves as the content of the HTTP request. - -- If the HTTP request uses a GET, OPTIONS, DELETE, TRACE, or CONNECT method, **extraData** serves as a supplement to the HTTP request parameters and will be added to the URL when the request is sent. - -- If you pass in a string object, **extraData** contains the string encoded on your own. - - -## What does error code 28 mean for an HTTP request? - -Applicable to: OpenHarmony SDK 3.2.2.5, stage model of API version 9 - -Error code 28 refers to **CURLE_OPERATION_TIMEDOUT**, which means a cURL operation timeout. For details, see any HTTP status code description available. - -Reference: [Response Codes](../reference/apis/js-apis-http.md#responsecode) and [Curl Error Codes](https://curl.se/libcurl/c/libcurl-errors.html) - - -## What does error code 6 mean for the response of \@ohos.net.http.d.ts? - -Applicable to: OpenHarmony SDK 3.2.3.5 - -Error code 6 indicates a failure to resolve the host in the address. You can ping the URL carried in the request to check whether the host is accessible. - -Reference: [Response Codes](../reference/apis/js-apis-http.md#responsecode) and [Curl Error Codes](https://curl.se/libcurl/c/libcurl-errors.html) diff --git a/en/application-dev/faqs/faqs-data-management.md b/en/application-dev/faqs/faqs-data-management.md deleted file mode 100644 index 47f0b7ce20cd54a1cee4eb521801d4e7ca94e04b..0000000000000000000000000000000000000000 --- a/en/application-dev/faqs/faqs-data-management.md +++ /dev/null @@ -1,76 +0,0 @@ -# Data Management Development - -## How Do I Save PixelMap Data to a Database? - -Applicable to: OpenHarmony SDK 3.2.3.5 - -You can convert a **PixelMap** into an **ArrayBuffer** and save the **ArrayBuffer** to your database. - -Reference: [readPixelsToBuffer](../reference/apis/js-apis-image.md#readpixelstobuffer7-1) - -## How Do I Obtain RDB Store Files? - -Applicable to: OpenHarmony SDK 3.2.3.5, stage model of API version 9 - -Run the hdc_std command to copy the .db, .db-shm, and .db-wal files in **/data/app/el2/100/database/*bundleName*/entry/db/**, and then use the SQLite tool to open the files. - -Example: - -``` - hdc_std file recv /data/app/el2/100/database/com.xxxx.xxxx/entry/db/test.db ./test.db -``` - -## Does the Database Has a Lock Mechanism? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -The distributed data service (DDS), relational database (RDB) store, and preferences provided OpenHarmony have a lock mechanism. You do not need to bother with the lock mechanism during the development. - -## What Is a Transaction in an RDB Store? - -Applicable to: all versions - -When a large number of operations are performed in an RDB store, an unexpected exception may cause a failure of some data operations and loss of certain data. As a result, the application may become abnormal or even crash. - -A transaction is a group of tasks serving as a single logical unit. It eliminates the failure of some of the operations and loss of associated data. - -## What Data Types Does an RDB Store Support? - -Applicable to: OpenHarmony SDK 3.0 or later, stage model of API version 9 - -An RDB store supports data of the number, string, and Boolean types. The number array supports data of the Double, Long, Float, Int, or Int64 type, with a maximum precision of 17 decimal digits. - -## How Do I View Database db Files? - -Applicable to: OpenHarmony SDK 3.2.6.5, stage model of API version 9 - -1. Run the **hdc_std shell** command. - -2. Obtain the absolute path or sandbox path of the database. - -The absolute path is **/data/app/el2//database/**. The default **** is **100**. - -To obtain the sandbox path, run the **ps -ef | grep hapName** command to obtain the process ID of the application. - -The database sandbox path is **/proc//root/data/storage/el2/database/**. - -3. Run the **find ./ -name "\*.db"** command in the absolute path or sandbox path of the database. - -## How Do I Store Long Text Data? - -Applicable to: OpenHarmony SDK 3.2.5.5, API version 9 - -- Preferences support a string of up to 8192 bytes. - -- The KV store supports a value of up to 4 MB. - -Reference: [Preference Overview](../database/database-preference-overview.md) and [Distributed Data Service Overview](../database/database-mdds-overview.md) - -## How Do I Develop DataShare on the Stage Model - -Applicable to: OpenHarmony SDK 3.2.5.5, API version 9 - -The DataShare on the stage model cannot be used with the **DataAbility** for the FA model. The connected server application must be implemented by using **DataShareExtensionAbility**. - -Reference: [DataShare Development](../database/database-datashare-guidelines.md) - diff --git a/en/application-dev/faqs/faqs-development-board.md b/en/application-dev/faqs/faqs-development-board.md deleted file mode 100644 index 0a2a29db5ba68e57e2eee790485ae682ac78b6c0..0000000000000000000000000000000000000000 --- a/en/application-dev/faqs/faqs-development-board.md +++ /dev/null @@ -1,50 +0,0 @@ -# Development Board Usage - -## How do I take screenshots on a development board? - -Applicable to: OpenHarmony SDK 3.2.2.5, stage model of API version 9 - -- Method 1: Click the screenshot button in the Control Panel from the development board UI. The screenshot is displayed in Gallery. - -- Method 2: Run the screenshot script. Connect to the development board to a computer running Windows. Create a text file on the computer, copy the following script content to the file, change the file name extension to **.bat** (the HDC environment variables must be configured in advance), and click **Run**. The screenshot is saved to the same directory as the **.bat** script file. - Example: - - ``` - set filepath=/data/%date:~0,4%%date:~5,2%%date:~8,2%%time:~1,1%%time:~3,2%%time:~6,2%.png - echo %filepath% - : pause - hdc_std shell snapshot_display -f %filepath% - : pause - hdc_std file recv %filepath% . - : pause - ``` - -## How do I adjust Previewer in DevEco Studio so that the preview looks the same as what's displayed on a real RK3568 development board? - -Applicable to: DevEco Studio 3.0.0.991 - -1. Create a profile in Previewer. - - ![en-us_image_0000001361254285](figures/en-us_image_0000001361254285.png) - -2. Set the profile parameters as follows: - - Device type : default - - Resolution: 720\*1280 - - DPI: 240 - -## What should I do if Device Manager incorrectly identifies a development board as FT232R USB UART even when the development board already has a driver installed? - -Possible cause: The USB serial driver of the development version is not installed. - -Solution: Search for **FT232R USB UART**, and download and install the driver. - -## How do I complete authentication when logging in to the development board? - -Applicable to: OpenHarmony SDK 3.2.2.5 - -When connecting to the network that requires authentication, open any web page in the browser to access the authentication page. - -If there is no browser on the development board, you can install the [sample browser application](https://gitee.com/openharmony/app_samples/tree/master/device/Browser). diff --git a/en/application-dev/faqs/faqs-device-management.md b/en/application-dev/faqs/faqs-device-management.md deleted file mode 100644 index ea71edd6c9940437e197be35e60a6638c73ae88d..0000000000000000000000000000000000000000 --- a/en/application-dev/faqs/faqs-device-management.md +++ /dev/null @@ -1,48 +0,0 @@ -# Device Management Development - -## How do I obtain the DPI of a device? - -Applicable to: OpenHarmony 3.2 Beta5, stage model of API version 9 - -Import the **@ohos.display** module and call the **getDefaultDisplaySync** API. - -**Example** - -``` -import display from '@ohos.display'; -let displayClass = null; -try { - displayClass = display.getDefaultDisplaySync(); - console.info('Test densityDPI:' + JSON.stringify(data.densityDPI)); -} catch (exception) { - console.error('Failed to obtain the default display object. Code: ' + JSON.stringify(exception)); -} -``` - -## How do I obtain the type of the device where the application is running? - -Applicable to: OpenHarmony SDK 3.2.2.5, stage model of API version 9 - -Import the **\@ohos.deviceInfo** module and call the **deviceInfo.deviceType** API. - -For details, see [Device Information](../reference/apis/js-apis-device-info.md). - -## How do I obtain the system version of a device? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -Use the **osFullName** attribute of the [deviceInfo](../reference/apis/js-apis-device-info.md) object. - -## How do I obtain the UDID of an OpenHarmony device? - -Applicable to: OpenHarmony SDK3.0, stage model of API version 9 - -- To obtain the UDID of the connected device, run the **hdc shell bm get --udid** command. - -- For details about how to obtain the UDID from code, see [udid](../reference/apis/js-apis-device-info.md). - -## How do I develop a shortcut key function? - -Applicable to: OpenHarmony SDK 3.2.6.5, stage model of API version 9 - -To develop a shortcut key function, use the APIs in [Input Consumer](../reference/apis/js-apis-inputconsumer.md). diff --git a/en/application-dev/faqs/faqs-dfx.md b/en/application-dev/faqs/faqs-dfx.md index 9fc12a1b1c26e109240702cca50e50b77495bdf5..51945bd8d0b9742703696d19fd2cc1f52add112d 100644 --- a/en/application-dev/faqs/faqs-dfx.md +++ b/en/application-dev/faqs/faqs-dfx.md @@ -1,54 +1,27 @@ # DFX Development -## How do I locate the fault when the application crashes? +## How do I flush HiLog information to disks? -Applicable to: OpenHarmony SDK 3.2.5.5 +Applicable to: OpenHarmony 3.2 Beta (API version 9) -1. Locate the crash-related code based on the service log. +**Symptom** -2. View the error information in the crash file, which is located at **/data/log/faultlog/faultlogger/**. +How do I flush HiLog information to disks? -## Why cannot access controls in the UiTest test framework? +**Solution** -Applicable to: OpenHarmony SDK 3.2.5.5 +Run the **hilog -w start -f ckTest -l 1M -n 5 -m zlib -j 11** command. -Check whether **persist.ace.testmode.enabled** is turned on. +The log file is saved in the **/data/log/hilog/** directory. -Run **hdc\_std shell param get persist.ace.testmode.enabled**. +Parameter description: -If the value is **0**, run the **hdc\_std shell param set persist.ace.testmode.enabled 1** to enable the test mode. - - -## Why is private displayed in logs when the format parameter type of HiLog in C++ code is %d or %s? - -When format parameters such as **%d** and **%s** are directly used, the standard system uses **private** to replace the actual data for printing by default to prevent data leakage. To print the actual data, replace **%d** with **%{public}d** or replace **%s** with **%{public}s**. - -## What should I do if the hilog.debug log cannot be printed? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -Run **hdc_std shell hilog -b D** to turn on the debugging switch. - -## Is HiLog or console recommended for log printing? How do I set the domain if HiLog is used? - -Applicable to: OpenHarmony SDK 3.2.2.5 - -You are advised to use the [HiLog](../reference/apis/js-apis-hilog.md) for log printing. For details about how to set the **domain** parameter, see the [Development Guide](../reference/apis/js-apis-hilog.md#hilogisloggable). - -## What is the maximum length of a log record when HiLog is used? Is it configurable? - -Applicable to: OpenHarmony SDK 3.2.2.5 - -The maximum length of a log record is 1,024 characters, and it is not changeable. - -## Can I separate multiple strings by spaces in the tag parameter of the HiLog API? - -Applicable to: OpenHarmony SDK 3.2.6.5, stage model of API version 9 - -No. Separating multiple strings by spaces is not allowed. - -## How do I print real data if HiLog does not contain data labeled by {public}? - -Applicable to: OpenHarmony SDK 3.2.6.5, stage model of API version 9 - -Run **hdc\_std shell hilog -p off** to disable logging of data labeled by {public}. +``` +-**-w**: Starts a log flushing task. **start** means to start the task, and **stop** means to stop the task. +-**-f**: Sets the log file name. +-**-l**: Sets the size of a single log file. The unit can be B, KB, MB, or GB. +-**-n**: Sets the maximum number of log files. When the number of log files exceeds the specified value, the earliest log file will be overwritten. The value range is [2,1000]. +-**-m**: Specifies the log file compression algorithm. +-**-j**: Specifies the task ID. The value ranges from **10** to **0xffffffffff**. +For more details about parameters, run the **hilog --help** command. +``` diff --git a/en/application-dev/faqs/faqs-distributed-data-management.md b/en/application-dev/faqs/faqs-distributed-data-management.md new file mode 100644 index 0000000000000000000000000000000000000000..c44b7b254ae85280e00430621845dd82d7e2fca6 --- /dev/null +++ b/en/application-dev/faqs/faqs-distributed-data-management.md @@ -0,0 +1,101 @@ +# Data Management Development + + +## How do I encrypt an RDB store? + +Applicable to: OpenHarmony 3.1 Beta 5 (API version 9) + +**Solution** + +To encrypt an RDB store, set **encrypt** in **StoreConfig** to **true** when creating the RDB store. + +**Reference** + +[RDB Store](../reference/apis/js-apis-data-relationalStore.md#storeconfig) + +## What if I failed to clear a table in an RDB store using TRUNCATE TABLE? + +Applicable to: OpenHarmony SDK 3.2.9.2 (API version 9) + +**Symptom** + +An error is reported when the **TRUNCATE TABLE** statement is used to clear table data. + +**Solution** + +The RDB store uses SQLite and does not support the **TRUNCATE TABLE** statement. To clear a table in an RDB store, use the **DELETE** statement, for example, **DELETE FROM sqlite\_sequence WHERE name = 'table\_name'**. + + + +## What data types does an RDB store support? + +Applicable to: OpenHarmony SDK 3.0 or later, API version 9 stage model + +**Solution** + +An RDB store supports data of the number, string, and Boolean types. The number type supports data of the Double, Long, Float, Int, or Int64 type, with a maximum precision of 17 decimal digits. + +## How do I save pixel map data to a database? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Symptom** + +Pixel map data fails to be stored. + +**Solution** + +Convert the pixel map data into an **ArrayBuffer** and save the **ArrayBuffer** to your database. + +**Reference** + +[readPixelsToBuffer](../reference/apis/js-apis-image.md#readpixelstobuffer7-1) + +## How do I obtain RDB store files? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +The RDB store files are stored in **/data/app/el2/100/database/*Bundle_name*/entry/rdb/**. You can use the hdc command to copy the file from the directory and use a SQLite tool to open the file. + +Example: + +``` + hdc file recv /data/app/el2/100/database//entry/db/ ./ +``` + +## Do the OpenHarmony databases have a lock mechanism? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +The distributed data service (DDS), RDB store, and preferences provided OpenHarmony have a lock mechanism. You do not need to bother with the lock mechanism during the development. + +## What if I failed to use get() to obtain the data saved by @ohos.data.storage put()? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Symptom** + +After @ohos.data.storage **put()** is called to save data, **get()** is called to obtain the data. However, the data fails to be obtained. + +**Solution** + +The **put()** method provided by **@ohos.data.storage** saves data in the memory. When the application exits, the data in the memory will be cleared. If you want to persist the data, you need to call **flush()** or **flushSync()** after **put()**. After data is persisted, you can use **get()** to obtain the data after the application is restarted. + + +## What if a large text file fails to be saved in an RDB store? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Symptom** + +In API version 8, large text files cannot be saved in RDB stores. + +**Solution** + +In versions earlier than API version 9, the maximum length of a text file is 1024 bytes. If the text file exceeds 1024 bytes, it cannot be saved. + +The limit on the text file size has been removed since API9 version. diff --git a/en/application-dev/faqs/faqs-file-management.md b/en/application-dev/faqs/faqs-file-management.md index 6214cbc6b61aadc0e7501fbf3166d050b46500cb..85763b7eb00f9b8805786e80c208ea4059e8bb0e 100644 --- a/en/application-dev/faqs/faqs-file-management.md +++ b/en/application-dev/faqs/faqs-file-management.md @@ -1,107 +1,21 @@ # File Management Development -## Does fileio.rmdir Delete Files Recursively? +## How do I obtain the path of system screenshots? -Applicable to: OpenHarmony SDK 3.2.6.3, stage model of API version 9 +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) -Yes. **fileio.rmdir** deletes files recursively. +**Solution** -## How Do I Create a File That Does Not Exist? +The screenshots are stored in **/storage/media/100/local/files/Pictures/Screenshots/**. -Applicable to: OpenHarmony SDK 3.2.6.3, stage model of API version 9 +## How do I change the permissions on a directory to read/write on a device? -You can use **fileio.open(filePath, 0o100, 0o666)**. The second parameter **0o100** means to create a file if it does not exist. The third parameter **mode** must also be specified. +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) -## What If "call fail callback fail, code: 202, data: json arguments illegal" Is Displayed? +**Symptom** -Applicable to: OpenHarmony SDK 3.2.6.3, stage model of API version 9 +When the hdc command is used to send a file to a device, "permission denied" is displayed. -When the **fileio** module is used to copy files, the file path cannot start with "file:///". +**Solution** -## How Do I Read Files Outside the App Sandbox? - -Applicable to: OpenHarmony SDK 3.2.6.5, stage model of API version 9 - -If the input parameter of the **fileio** API is **path**, only the sandbox directory of the current app obtained from the context can be accessed. To access data in other directories such as the user data, images, and videos, open the file as the data owner and operate with the file descriptor (FD) returned. - -For example, to read or write a file in Media Library, perform the following steps: - -1. Use **getFileAssets()** to obtain the **fileAsset** object. - -2. Use **fileAsset.open()** to obtain the FD. - -3. Use the obtained FD as the **fileIo** API parameter to read and write the file. - -## What If the File Contains Garbled Characters? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -Read the file content from the buffer, and decode the file content using **util.TextDecoder**. - -Example: - -``` -import util from '@ohos.util' -async function readFile(path) { - let stream = fileio.createStreamSync(path, "r+"); - let readOut = await stream.read(new ArrayBuffer(4096)); - let textDecoder = new util.TextDecoder("utf-8", { ignoreBOM: true }); - let buffer = new Uint8Array(readOut.buffer) - let readString = textDecoder.decode(buffer, { stream: false }); - console.log ("[Demo] File content read: "+ readString); -} -``` - -## What Should I Do If There Is No Return Value or Error Captured After getAlbums Is Called? - -Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9 - -The **ohos.permission.READ_MEDIA** is required for using **getAlbums()**. In addition, this permission needs user authorization. For details, see [OpenHarmony Permission List](../security/permission-list.md). - -1. Configure the required permission in the **module.json5** file. - - ``` - "requestPermissions": [ - { - "name": "ohos.permission.READ_MEDIA" - } - ] - ``` - -2. Add the code for user authorization before the **MainAbility.ts -> onWindowStageCreate** page is loaded. - - ``` - import abilityAccessCtrl from '@ohos.abilityAccessCtrl.d.ts'; - - private requestPermissions() { - let permissionList: Array = [ - "ohos.permission.READ_MEDIA" - ]; - let atManager = abilityAccessCtrl.createAtManager(); - atManager.requestPermissionsFromUser(this.context, permissionList) - .then(data => { - console.info(`request permission data result = ${data.authResults}`) - }) - .catch(err => { - console.error(`fail to request permission error:${err}`) - }) - } - ``` - -## What Do I Do If the App Crashes When FetchFileResult() Is Called Multiple Times? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -Each time after the **FetchFileResult** object is called, call **FetchFileResult.close()** to release and invalidate the **FetchFileResult** object . - -## What If An Error Is Reported by IDE When mediaLibrary.getMediaLibrary() Is Called in the Stage Model? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -In the stage model, use **mediaLibrary.getMediaLibrary(context: Context)** to obtain the media library instance. - -## How Do I Sort the Data Returned by mediaLibrary.getFileAssets()? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -Use the **order** attribute in **[MediaFetchOptions](../reference/apis/js-apis-medialibrary.md#mediafetchoptions7)** to sort the data returned. +Run the **hdc shell mount -o remount,rw /** command to grant the read/write permissions. diff --git a/en/application-dev/faqs/faqs-globalization.md b/en/application-dev/faqs/faqs-globalization.md new file mode 100644 index 0000000000000000000000000000000000000000..b4d06ab98cbb1b24f4f0384ed893126c334ff383 --- /dev/null +++ b/en/application-dev/faqs/faqs-globalization.md @@ -0,0 +1,118 @@ +# Resource Manager Development + +## How do I read an XML file in rawfile and convert the data in it to the string type? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +Call **getRawFileContent** of the **ResourceManager** module to obtain the data in the XML file, and then use **String.fromCharCode** to convert the data to the string type. + +**Sample Code** + +``` +resourceManager.getRawFileContent('test.xml', (error, value) => { + if (error != null) { + console.log("error is " + error); + } else { + let rawFile = value; + let xml = String.fromCharCode.apply(null, rawFile) + } +}); +``` + +**Reference** + +[Resource Manager](../reference/apis/js-apis-resource-manager.md) + +## How do I obtain resources in the stage model? + +Applicable to: OpenHarmony 3.1 Beta 5 (API version 9) + +**Solution** + +The stage model allows an application to obtain a **ResourceManager** object based on **context** and call its resource management APIs without first importing the required bundle. This mode does not apply to the FA model. + +**Sample Code** + +``` +const context = getContext(this) as any +context + .resourceManager + .getString($r('app.string.entry_desc').id) + .then(value => { + this.message = value.toString() +}) +``` + +## How do I obtain the path of the resource directory by using an API? + +Applicable to: OpenHarmony 3.1 Beta 5 (API version 9) + +**Symptom** + +How do I obtain the path of the **resource** directory so that I can manage the files in it by using the file management API? + +**Solution** + +Because the application is installed in HAP mode and the HAP package is not decompressed after the installation is complete, the resource path cannot be obtained when the program is running. + +To obtain the path of the **resource** directory, try either of the following ways: + +1. Use **\$r** or **\$rawfile** for access. This method applies to static access, during which the **resource** directory remains unchanged when the application is running. + +2. Use **ResourceManager** for access. This method applies to dynamic access, during which the **resource** directory dynamically changes when the application is running. + +**Reference** + +[Resource Categories and Access](../quick-start/resource-categories-and-access.md) and [Resource Manager](../reference/apis/js-apis-resource-manager.md) + +## Why does getPluralString return an incorrect value? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Symptom** + +The value obtained by the **getPluralString** is **other**, which is incorrect. + +**Solution** + +The **getPluralString** API is effective only when the system language is English. + +## How do I obtain the customized string fields in the resources directory? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +Use **getStringValue** of the **ResourceManager** module. + +**Reference** + +[Resource Manager](../reference/apis/js-apis-resource-manager.md#getstringvalue9) + +## How do I reference resources such as images and text in AppScope? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +Reference resources in the **\$r\('app.type.name'\)** format. Wherein, **type** indicates the resource type, such as color, string, and media, and **name** indicates the resource name. + +## How do I convert resources to strings? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +For a qualifier directory, use **this.context.resourceManager.getStringSync\(\$r\('app.string.test'\).id\)** to covert resources to strings synchronously. Note that the **\$r\('app.string.test', 2\)** mode is not supported. + +**Reference** + +[Resource Manager](../reference/apis/js-apis-resource-manager.md#getstringsync9) + +## Can $ be used to reference constants in the form\_config.json file? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**\$** cannot be used to reference constants in the **form\_config.json** file. diff --git a/en/application-dev/faqs/faqs-hdc-std.md b/en/application-dev/faqs/faqs-hdc-std.md deleted file mode 100644 index 60f93da61d7d78a4e148b65c0e30d379b1e1206d..0000000000000000000000000000000000000000 --- a/en/application-dev/faqs/faqs-hdc-std.md +++ /dev/null @@ -1,87 +0,0 @@ -# hdc_std Command Usage - -## Common Log Commands - -Applicable to: OpenHarmony SDK 3.2.2.5 - -Clearing logs: hdc_std shell hilog -r - -Increasing the buffer size to 20 MB: hdc_std shell hilog -G 20M - -Capturing logs: hdc_std shell hilog > log.txt - -## What should I do to avoid log flow control? - -Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9 - -- Disabling log flow control: hdc_std shell hilog -Q pidoff - -- Disabling the privacy flag: hdc_std shell hilog -p off - -- Increasing the log buffer to 200 MB: hdc_std shell hilog -G 200M - -- Enabling the log function of the specific domain (that is, disabling the global log function): hdc_std shell hilog –b D –D 0xd0xxxxx - -After performing the preceding operations, restart the DevEco Studio. - -## What should I do if the HAP installed on the development board through the IDE cannot be opened? - -Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9 - -Check whether the SDK version is consistent with the system version on the development board. You are advised to use the SDK version and system version that are released on the same day. - -## How do I upload files using the hdc command? - -Applicable to: OpenHarmony SDK 3.2.2.5 - -Run the **hdc_std file send** command. - -## How do I prevent the screen of the RK3568 development board from turning off? - -Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9 - -Run the **hdc_std shell "power-shell setmode 602"** command. - -## How do I start an ability using the hdc command? - -Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9 - -Run the **hdc\_std shell aa start -a AbilityName -b bundleName -m moduleName** command. - -## How do I change the read and write permissions on a file directory on the development board? - -Applicable to: OpenHarmony SDK 3.2.5.6, stage model of API version 9 - -Run the **hdc\_std shell mount -o remount,rw /** command. - -## What should I do if the error message "Unknown file option -r" is displayed when hdc_std file recv is run? - -Applicable to: OpenHarmony SDK 3.2.5.6, stage model of API version 9 - -1. Use the the hdc tool in the device image or SDK of the same version. - -2. Remove any Chinese characters or spaces from the directory specified for the hdc tool. - -## How do I uninstall an application using the hdc command? - -Applicable to: OpenHarmony SDK 3.2.2.5 - -Run the **hdc\_std uninstall [-k] [package_name]** command. - -## How do I check whether the system is 32-bit or 64-bit? - -Applicable to: OpenHarmony SDK 3.2.5.5 - -Run the **hdc\_std shell getconf LONG_BIT** command. - -If **64** is returned, the system is a 64-bit one. Otherwise, the system is a 32-bit one. - -## How do I view the component tree structure? - -Applicable to: OpenHarmony SDK 3.2.5.5 - -1. Run the **hdc\_std shell** command to launch the CLI. - -2. Run the **aa dump -a** command to find **abilityID**. - -3. Run the **aa dump -i [abilityID] -c -render** command to view the component tree. diff --git a/en/application-dev/faqs/faqs-international.md b/en/application-dev/faqs/faqs-international.md deleted file mode 100644 index 546402921ce3a2cd9f9972721727a84d9a31295a..0000000000000000000000000000000000000000 --- a/en/application-dev/faqs/faqs-international.md +++ /dev/null @@ -1,19 +0,0 @@ -# Intl Development - -## How resources in AppScope, such as images and text, are referenced? - -Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 - -Resources are referenced in the **$r('app.type.name')** format. Where, **type** indicates the resource type, such as color, string, and media, and **name** indicates the resource name. - -## How do I convert the resource type to string? - -Applicable to: OpenHarmony SDK3.0, stage model of API version 9 - -If the resource type is set to **string**, the qualifier directory can be set as **this.context.resourceManager.getStringSync(\\$r('app.string.test').id)** and can be converted synchronously. The **\$r('app.string.test', 2)** mode is not supported. For more usage methods, see [Resource Manager](../reference/apis/js-apis-resource-manager.md#getstringsync9). - -## Why should I do if the constants referenced by $ in the form_config.json file does not take effect? - -Applicable to: OpenHarmony SDK 3.2.6.5, API9 Stage model - -In the **form\_config.json** file, **$** cannot be used to reference constants. diff --git a/en/application-dev/faqs/faqs-language.md b/en/application-dev/faqs/faqs-language.md index 22a450b4c8e37dc85a28c2ea3b972b03d6ea16ae..686283d7f8b41fa7abc4f4c78f74eed1240014d8 100644 --- a/en/application-dev/faqs/faqs-language.md +++ b/en/application-dev/faqs/faqs-language.md @@ -85,8 +85,6 @@ Applicable to: OpenHarmony SDK 3.2.3.5, stage model of API version 9 Objects imported to abilities and pages are packaged into two different closures, that is, two global objects. In this case, a static variable referenced by the abilities is not the same object as that referenced by the pages. Therefore, global variables cannot be defined by defining static variables in the class. You are advised to use AppStorage to manage global variables. -Reference: [State Management with Application-level Variables](../quick-start/arkts-state-mgmt-application-level.md) - ## How do I obtain resources in the stage model? Applicable to: OpenHarmony SDK 3.2.3.5, stage model of API version 9 @@ -181,7 +179,7 @@ Similar to **new Date().getTime()**, **systemTime.getCurrentTime(false)** return Applicable to: OpenHarmony SDK 3.2.6.5, stage model of API version 9 -If no parameter is passed when assigning a value to the **@BuilderParam** decorated attribute (for example, **content: this.specificParam**), define the type of the attribute as a function without a return value (for example, **@BuilderParam content: () => voi**). If any parameter is passed when assigning a value to the **@BuilderParam** decorated attribute (for example, **callContent: this.specificParam1("111")**), define the type of the attribute as **any** (for example, **@BuilderParam callContent: any**). For details, see [BuilderParam](../quick-start/arkts-dynamic-ui-elememt-building.md#builderparam8). +If no parameter is passed when assigning a value to the **@BuilderParam** decorated attribute (for example, **content: this.specificParam**), define the type of the attribute as a function without a return value (for example, **@BuilderParam content: () => voi**). If any parameter is passed when assigning a value to the **@BuilderParam** decorated attribute (for example, **callContent: this.specificParam1("111")**), define the type of the attribute as **any** (for example, **@BuilderParam callContent: any**). ## How does ArkTS convert a string into a byte array? @@ -251,8 +249,6 @@ Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 To listen for in-depth changes of **@State** decorated variables, you can use **@Observed** and **@ObjectLink** decorators. -Reference: [@Observed and @ObjectLink](../quick-start/arkts-state-mgmt-page-level.md#observed-and-objectlink) - ## How do I implement character string encoding and decoding? Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9 diff --git a/en/application-dev/faqs/faqs-network-management.md b/en/application-dev/faqs/faqs-network-management.md new file mode 100644 index 0000000000000000000000000000000000000000..dd585f87cc40f2942404cc51c3fcb15bcf04d55e --- /dev/null +++ b/en/application-dev/faqs/faqs-network-management.md @@ -0,0 +1,222 @@ +# Network Management Development + +## What are the data formats supported by extraData in an HTTP request? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Solution** + +**extraData** indicates additional data in an HTTP request. It varies depending on the HTTP request method. + +- If the HTTP request uses a POST or PUT method, **extraData** serves as the content of the HTTP request. +- If the HTTP request uses a GET, OPTIONS, DELETE, TRACE, or CONNECT method, **extraData** serves as a supplement to the HTTP request parameters and will be added to the URL when the request is sent. +- If you pass in a string object, **extraData** contains the string encoded on your own. + +## What does error code 28 mean in the response to an HTTP request? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +Error code 28 is reported after an HTTP request is initiated. + +**Solution** + +Error code 28 indicates **CURLE\_OPERATION\_TIMEDOUT**, which means a libcurl library operation timeout. For details, see any HTTP status code description available. + +**Reference** + +[Common HTTP Response Codes](../reference/apis/js-apis-http.md#responsecode) and [Curl Error Codes](https://curl.se/libcurl/c/libcurl-errors.html) + +## What does error code 6 mean in the response to an HTTP request? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +Error code 6 is reported after an HTTP request is initiated. + +**Solution** + +Error code 6 indicates a failure to resolve the host in the address. You can ping the URL carried in the request to check whether the host is accessible. + +**Reference** + +[Common HTTP Response Codes](../reference/apis/js-apis-http.md#responsecode) and [Curl Error Codes](https://curl.se/libcurl/c/libcurl-errors.html) + +## How are parameters passed to queryParams of the POST request initiated by @ohos/axios? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +How are parameters passed to **queryParams** when the third-party component @ohos/axios initiates a POST request? + +**Solution** + +- Method 1: Have the **axios.post** API receive only one parameter. The **Url.URLSearchParams** parameter needs to be converted into a string and appended to the end of the URL. + + ``` + let params:Url.URLSearchParams = new Url.URLSearchParams() + params.append('ctl', 'sug') + params.append('query', 'wangjunkai') + params.append('cfrom', '1099a') + axios.post('http://10.100.195.234:3000/save?' + params.toString()).then(res => { + this.message = "request result: " + JSON.stringify(res.data); + }).catch(err => { + this.message = "request error: " + err.message; + }) + ``` + +- Method 2: Have the **axios** API receive only one **config** object. The request parameters are written in **params** of the **config** object. + + ``` + axios({ + url: 'http://10.100.195.234:3000/save', + method: 'post', + params: { + ctl: 'sug', + query: 'wangjunkai', + cfrom: '1099a' + } + }).then(res => { + this.message = "request result: " + JSON.stringify(res.data); + }).catch(err => { + this.message = "request error: " + err.message; + }) + ``` + + +## What should I do if no data is returned after connection.getNetCapabilities\(mNetHandle\) is called? + +Applicable to: OpenHarmony 3.2 Beta 2 (API version 9) + +**Symptom** + +No data is returned after **connection.getNetCapabilities\(\)** is called. What should I do? + +**Possible Cause** + +This problem is due to incorrect pointing of the **this** object. You are expected to use **\(err,data\)=\>\{\}** instead of **function\(err,data\)** to access the callback function to obtain the return result. The reason is that the function declared by **function** has its own **this** object and therefore cannot point to the **globalThis** object. + +**Solution** + +Change **function\(err,data\)** to **\(err,data\)** for the second parameter of **getNetCapabilities**. + +## How is data in HTTP requests transmitted in JSON format? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Solution** + +In the HTTP message header, **Content-Type** is used to indicate the media type information. It tells the server how to process the requested data and tells the client (usually a browser) how to parse the response data, for example, displaying an image, parsing HTML, or displaying only the text. + +To transmit data in HTTP requests in JSON format, set **Content-Type** to **application/json**. + +``` +this.options = { + method: http.RequestMethod.GET, + extraData: this.extraData, + header: { 'Content-Type': 'application/json' }, + readTimeout: 50000, + connectTimeout: 50000 +} +``` + +## How do I upload photos taken by a camera to the server? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Symptom** + +After an application calls the camera to take a photo, how do I upload the photo to the server? + +**Solution** + +After the application is started and the permission is obtained, have the system access the remote server and transfer the locally saved photos to the remote server through the upload API. + +**Reference** + +[Upload and Download](../reference/apis/js-apis-request.md) + +## What should I do if calling connection.hasDefaultNet\(\) fails even when the network is normal? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +The network connection is normal, and web pages can be opened properly on the browser. However, calling the **hasDefaultNet** fails, and the callback function returns an error. + +**Solution** + +Declare the **ohos.permission.GET\_NETWORK\_INFO** permission when calling **connection.hasDefaultNet**. + +For details, see [Applying for Permissions](../security/accesstoken-guidelines.md). + +## What does netId mean in the netHandle object returned by connection.getDefaultNet? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +What are the meanings of the values of **netId**, such as **0** and **100**? + +**Solution** + +If the value of **netId** is **0**, no network connection is available. In such a case, check and rectify network faults. If the value is greater than or equal to **100**, the network connection is normal. + +## How do I use HTTP requests to obtain data from the network? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Solution** + +Use the **@ohos.net.http** module to initiate an HTTP request. + +1. Import the **http** module and create an HTTP request. +2. Set the request URL and parameters and initiate the HTTP request. +3. Obtain the response and parse the data. + +**Reference** + +[HTTP Data Request](../connectivity/http-request.md) + +## How do I encapsulate network requests by using JavaScript? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Solution** + +OpenHarmony supports the JavaScript development mode. You can directly use JavaScript to encapsulate network requests. For details, see [Network Connection](../reference/apis/js-apis-http.md). + +## How do I write network requests when developing a JavaScript-based application for smart watches? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Solution** + +OpenHarmony supports the JavaScript development mode. You can directly use JavaScript to encapsulate network requests. For details, see [Network Connection](../reference/apis/js-apis-http.md). + +## Why does an application fail to start after the ohos.permission.NOTIFICATION\_CONTROLLER permission is declared? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +When an application is started, the following error message is reported w: error: install failed due to grant request permissions failed. + +**Solution** + +The **ohos.permission.NOTIFICATION\_CONTROLLER** permission is a **system core** permission and is not available for third-party applications. + +## What should I do if an error is reported when wifi.getIpInfo\(\).ipAddress is used in the Wi-Fi module? + +Applicable to: OpenHarmony 3.2 Beta (API version 9) + +**Symptom** + +When **wifi.getIpInfo\(\).ipAddress** is used in the Wi-Fi module, the following error message is reported: Error: assertion \(wifiDevicePtr != nullptr\) failed: Wifi device instance is null. + +**Solution** + +This problem is due to insufficient permissions. Check whether you have applied for the required permissions. For details, see [Permission Management](../security/accesstoken-overview.md). diff --git a/en/application-dev/faqs/faqs-security.md b/en/application-dev/faqs/faqs-security.md new file mode 100644 index 0000000000000000000000000000000000000000..691739a0c288a3315e27e1f54700ba4e2637ddb3 --- /dev/null +++ b/en/application-dev/faqs/faqs-security.md @@ -0,0 +1,41 @@ +# Basic Security Capability Development + +## What is the maximum number of bytes that can be encrypted at a time in AES GCM mode in HUKS? + +Applicable to: OpenHarmony 3.1 Beta 5 (API version 9) + +**Solution** + +In HUKS, a maximum of 64 bytes can be encrypted at a time in AES GCM mode. + +**Example** + +``` +/* Encrypt the key. */ +await huks.init(srcKeyAlias, encryptOptions).then((data) => { + console.info(`test init data: ${JSON.stringify(data)}`); + handle = data.handle; +}).catch((err) => { + console.info('test init err information: ' + JSON.stringify(err)); +}); +encryptOptions.inData = aesCipherStringToUint8Array(cipherInData.slice (0,64)); // Take 64 bytes. +await huks.update(handle, encryptOptions).then(async (data) => { + console.info(`test update data ${JSON.stringify(data)}`); + encryptUpdateResult = Array.from(data.outData); +}).catch((err) => { + console.info('test update err information: ' + err); +}); +encryptOptions.inData = aesCipherStringToUint8Array(cipherInData.slice (64,80)); // Remaining data +``` + +## What if garbled characters are returned by **digest()** of **Md** in Crypto framework? + +Applicable to: OpenHarmony 3.1 Beta 5 (API version 9) + +**Symptom** + +In the CryptoFramework, garbled characters are returned by **digest()** of **Md**. + +**Solution** + +The DataBlob returned by **digest()** is of the Uint8Array type and needs to be converted into a hexadecimal string before being printed. You can use the online MD5 encryption tool to verify the result. diff --git a/en/application-dev/faqs/faqs-startup.md b/en/application-dev/faqs/faqs-startup.md new file mode 100644 index 0000000000000000000000000000000000000000..3d0d9d2cf8b59cb7f1b3b3c778b963c509958ced --- /dev/null +++ b/en/application-dev/faqs/faqs-startup.md @@ -0,0 +1,43 @@ +# Startup Development + +## How do I obtain the system version of a device? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +You can obtain the system version of a device through the **osFullName** attribute of the [deviceInfo](../reference/apis/js-apis-device-info.md) object. + +**Sample Code** + +``` +import deviceInfo from '@ohos.deviceInfo' +let v = deviceInfo.osFullName +``` + +## How do I obtain the UDID of a device? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +- Method 1: Run the **hdc shell bm get --udid** command. +- Method 2: Obtain the value from the code. For details, see [udid](../reference/apis/js-apis-device-info.md). + +## How do I obtain device information? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +You can call **deviceInfo** to obtain device information, such as the device model. + +**Reference** + +[Device Information](../reference/apis/js-apis-device-info.md) + +## How do I prevent application development from being interrupted by screen saving? + +Applicable to: OpenHarmony 3.2 Beta 5 (API version 9) + +**Solution** + +Run the **hdc shell "power-shell setmode 602"** command to turn off screen saving. diff --git a/en/application-dev/faqs/faqs-web-arkts.md b/en/application-dev/faqs/faqs-web-arkts.md index be2d58f82d54c9b95596ad3e767954fb7acfceca..6fe2c75a4bf0bc9b1d2f73929a34dc618c503d5b 100644 --- a/en/application-dev/faqs/faqs-web-arkts.md +++ b/en/application-dev/faqs/faqs-web-arkts.md @@ -76,4 +76,4 @@ Applicable to: OpenHarmony SDK 3.2.7.5, stage model of API version 9 4. Use message port 0 on the application side to send messages to message port 1 on the HTML side. -Reference: [Web](../reference/arkui-ts/ts-basic-components-web.md#postmessage9) +Reference: [Web](../reference/arkui-ts/ts-basic-components-web.md) diff --git a/en/application-dev/file-management/Readme-EN.md b/en/application-dev/file-management/Readme-EN.md index 4aa7c9af2c977aac2a987de80bad2b6f164b1215..bdc1bc7c1b00195bc24c71396f4fed78b93de15a 100644 --- a/en/application-dev/file-management/Readme-EN.md +++ b/en/application-dev/file-management/Readme-EN.md @@ -1,10 +1,23 @@ # File Management -- MediaLibrary Management - - [MediaLibrary Overview](medialibrary-overview.md) - - [Media Asset Management](medialibrary-resource-guidelines.md) - - [File Path Management](medialibrary-filepath-guidelines.md) - - [Album Management](medialibrary-album-guidelines.md) -- File Access Framework - - [File Access Framework Overview](file-access-framework-overview.md) -- [FilePicker Guide](filepicker-guidelines.md) \ No newline at end of file +- [File Management Overview](file-management-overview.md) +- Application File + - [Application File Overview](app-file-overview.md) + - [Application Sandbox Directory](app-sandbox-directory.md) + - Application File Access and Management + - [Accessing Application Files](app-file-access.md) + - [Uploading and Downloading Application Files](app-file-upload-download.md) + - [Obtaining Application and File System Space Statistics](app-fs-space-statistics.md) + - [Sending Files to an Application Sandbox](send-file-to-app-sandbox.md) + - [Sharing an Application File](share-app-file.md) +- User File + - [User File Overview](user-file-overview.md) + - Selecting and Saving User Files (FilePicker) + - [Selecting User Files](select-user-file.md) + - [Saving User Files](save-user-file.md) + - [Developing a FileManager Application (Available Only for System Applications)](dev-user-file-manager.md) + - [Managing External Storage Devices (Available Only for System Applications)](manage-external-storage.md) +- Distributed File System + - [Distributed File System Overview](distributed-fs-overview.md) + - [Setting the Security Level of a Distributed File](set-security-label.md) + - [Accessing Files Across Devices](file-access-across-devices.md) diff --git a/en/application-dev/file-management/app-file-access.md b/en/application-dev/file-management/app-file-access.md new file mode 100644 index 0000000000000000000000000000000000000000..a98e1ec1d8e82af603621f8cb41bad58ecc79d93 --- /dev/null +++ b/en/application-dev/file-management/app-file-access.md @@ -0,0 +1,172 @@ +# Accessing Application Files + +This topic describes how to view, create, read, write, delete, move, or copy a file in the application file directory and obtain the file information. + +## Available APIs + +You can use [ohos.file.fs](../reference/apis/js-apis-file-fs.md) to implement the application file access capabilities. The following table describes the APIs. + +**Table 1** APIs for basic application file operations + +| API| Description| Type| Synchronous Programming| Asynchronous Programming| +| -------- | -------- | -------- | -------- | -------- | +| access | Checks whether a file exists.| Method| √ | √ | +| close | Closes a file.| Method| √ | √ | +| copyFile | Copies a file.| Method| √ | √ | +| createStream | Creates a stream based on the specified file path.| Method| √ | √ | +| listFile | Lists all files in a directory.| Method| √ | √ | +| mkdir | Creates a directory.| Method| √ | √ | +| moveFile | Moves a file.| Method| √ | √ | +| open | Opens a file.| Method| √ | √ | +| read | Reads data from a file.| Method| √ | √ | +| rename | Renames a file or folder.| Method| √ | √ | +| rmdir | Deletes a directory.| Method| √ | √ | +| stat | Obtains detailed file information.| Method| √ | √ | +| unlink | Deletes a single file.| Method| √ | √ | +| write | Writes data to a file.| Method| √ | √ | +| Stream.close | Closes a stream.| Method| √ | √ | +| Stream.flush | Flushes all data from this stream.| Method| √ | √ | +| Stream.write | Writes data to a stream.| Method| √ | √ | +| Stream.read | Reads data from a stream.| Method| √ | √ | +| File.fd | Defines a file descriptor.| Attribute| √ | × | +| OpenMode | Defines the mode for opening a file.| Attribute| √ | × | +| Filter | Defines the options for setting the file filter.| Type| × | × | + +## Development Example + +Obtain the [application file path](../application-models/application-context-stage.md#obtaining-the-application-development-path). The following example shows how to obtain a HAP file path using **UIAbilityContext**. For details about how to obtain **UIAbilityContext**, see [Obtaining the Context of UIAbility](../application-models/uiability-usage.md#obtaining-the-context-of-uiability). + +The following describes common file operations. + +### Creating, Reading, and Writing a File + +The following example demonstrates how to create a file, read data from it, and write data to it. + +```ts +// pages/xxx.ets +import fs from '@ohos.file.fs'; +import common from '@ohos.app.ability.common'; + +function createFile() { + // Obtain the application file path. + let context = getContext(this) as common.UIAbilityContext; + let filesDir = context.filesDir; + + // Create a file and open it. + let file = fs.openSync(filesDir + '/test.txt', fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); + // Write data to the file. + let writeLen = fs.writeSync(file.fd, "Try to write str."); + console.info("The length of str is: " + writeLen); + // Read data from the file. + let buf = new ArrayBuffer(1024); + let readLen = fs.readSync(file.fd, buf, { offset: 0 }); + console.info("the content of file: " + String.fromCharCode.apply(null, new Uint8Array(buf.slice(0, readLen)))); + // Close the file. + fs.closeSync(file); +} +``` + +### Copying Data to Another File + + The following example demonstrates how to write the data read from a file to another file. + +```ts +// pages/xxx.ets +import fs from '@ohos.file.fs'; +import common from '@ohos.app.ability.common'; + +function readWriteFile() { + // Obtain the application file path. + let context = getContext(this) as common.UIAbilityContext; + let filesDir = context.filesDir; + + // Open the source and destination files. + let srcFile = fs.openSync(filesDir + '/test.txt', fs.OpenMode.READ_WRITE); + let destFile = fs.openSync(filesDir + '/destFile.txt', fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); + // Read data from the source file and copy it to the destination file. + let bufSize = 4096; + let readSize = 0; + let buf = new ArrayBuffer(bufSize); + let readLen = fs.readSync(srcFile.fd, buf, { offset: readSize }); + while (readLen > 0) { + readSize += readLen; + fs.writeSync(destFile.fd, buf); + readLen = fs.readSync(srcFile.fd, buf, { offset: readSize }); + } + // Close the files. + fs.closeSync(srcFile); + fs.closeSync(destFile); +} +``` + +> **NOTE** +> +> When using **read()** or **write()**, pay attention to the optional parameter **offset**. For a file that has been read or written, the offset pointer is at the end position of the last read or write operation by default. + +### Reading and Writing Files in a Stream + +The following example demonstrates how to read and write file data using a stream. + +```ts +// pages/xxx.ets +import fs from '@ohos.file.fs'; +import common from '@ohos.app.ability.common'; + +async function readWriteFileWithStream() { + // Obtain the application file path. + let context = getContext(this) as common.UIAbilityContext; + let filesDir = context.filesDir; + + // Open the file streams. + let inputStream = fs.createStreamSync(filesDir + '/test.txt', 'r+'); + let outputStream = fs.createStreamSync(filesDir + '/destFile.txt', "w+"); + // Read data from the source file and write the data to the destination file using a stream. + let bufSize = 4096; + let readSize = 0; + let buf = new ArrayBuffer(bufSize); + let readLen = await inputStream.read(buf, { offset: readSize }); + readSize += readLen; + while (readLen > 0) { + await outputStream.write(buf); + readLen = await inputStream.read(buf, { offset: readSize }); + readSize += readLen; + } + // Close the streams. + inputStream.closeSync(); + outputStream.closeSync(); +} +``` + +> **NOTE** +> +> Close the stream that is no longer used in a timely manner.
Comply with the related programming specifications for **Stream** APIs in asynchronous mode and avoid mixed use of the APIs in synchronous mode and asynchronous mode.
The **Stream** APIs do not support concurrent read and write operations. + +### Listing Files + +The following example demonstrates how to list files. + +```ts +// List files. +import fs from '@ohos.file.fs'; +import common from '@ohos.app.ability.common'; + +// Obtain the application file path. +let context = getContext(this) as common.UIAbilityContext; +let filesDir = context.filesDir; + +// List files that meet the specified conditions. +let options = { + recursion: false, + listNum: 0, + filter: { + suffix: ['.png', '.jpg', '.txt'], // The filename extension can be '.png', '.jpg', or '.txt'. + displayName: ['test%'], // The filename starts with 'test'. + fileSizeOver: 0, // The file size is greater than or equal to 0. + lastModifiedAfter: new Date(0).getTime(), // The latest modification time of the file is later than January 1, 1970. + }, +} +let files = fs.listFileSync(filesDir, options); +for (let i = 0; i < files.length; i++) { + console.info(`The name of file: ${files[i]}`); +} +``` diff --git a/en/application-dev/file-management/app-file-overview.md b/en/application-dev/file-management/app-file-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..f3d49fd0c640ca1602575a917c37fc7f97818842 --- /dev/null +++ b/en/application-dev/file-management/app-file-overview.md @@ -0,0 +1,11 @@ +# Application File Overview + +Application files are files of an application, including the application's installation files, resource files, and cache files. + +- The data used and saved by an application is stored in files, key-value (KV) pairs, and databases in a dedicated directory on a device. This directory is called application file directory, and the files in the directory are application files. + +- The directories visible to an application include the application file directory and a directory containing the minimum system files required for the running of the application. These two directories constitute an [application sandbox directory](app-sandbox-directory.md). That means, the application file directory is a subset of the application sandbox directory. + +- The system files and directories are read-only for the application. The application can only save files to sub-directories in the [application file directory](app-sandbox-directory.md#application-file-directory-and-application-file-path) based on certain rules. + +The following topics describe the application sandbox, application file directories, and how to access, manage, and share application files. diff --git a/en/application-dev/file-management/app-file-upload-download.md b/en/application-dev/file-management/app-file-upload-download.md new file mode 100644 index 0000000000000000000000000000000000000000..e95ba1dc8794404b4b79162356434017b57d08e4 --- /dev/null +++ b/en/application-dev/file-management/app-file-upload-download.md @@ -0,0 +1,105 @@ +# Uploading and Downloading an Application File + +This topic describes how to upload an application file to a network server and download a network resource file from a network server to a local application directory. + +## Uploading an Application File + +You can use [ohos.request](../reference/apis/js-apis-request.md) **uploadFile()** to upload local files. The system service proxy implements the upload. + +> **NOTE** +> +> Currently, only the files in the **cache/** directories (**cacheDir**) can be uploaded. +> +>
The **ohos.permission.INTERNET** permission is required for using **ohos.request**. For details about how to apply for a permission, see [Applying for Permissions](../security/accesstoken-guidelines.md). + +The following example demonstrates how to upload a file in the **cache** directory of an application to a network server. + +```ts +// pages/xxx.ets +import common from '@ohos.app.ability.common'; +import fs from '@ohos.file.fs'; +import request from '@ohos.request'; + +// Obtain the application file path. +let context = getContext(this) as common.UIAbilityContext; +let cacheDir = context.cacheDir; + +// Create an application file locally. +let file = fs.openSync(cacheDir + '/test.txt', fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); +fs.writeSync(file.fd, 'upload file test'); +fs.closeSync(file); + +// Configure the upload task. +let uploadConfig = { + url: 'https://xxx', + header: { key1: 'value1', key2: 'value2' }, + method: 'POST', + files: [ + { filename: 'test.txt', name: 'test', uri: 'internal://cache/test.txt', type: 'txt' } + ], + data: [ + { name: 'name', value: 'value' } + ] +} + +// Upload the created application file to the network server. +try { + request.uploadFile(context, uploadConfig) + .then((uploadTask) => { + uploadTask.on('complete', (taskStates) => { + for (let i = 0; i < taskStates.length; i++) { + console.info(`upload complete taskState: ${JSON.stringify(taskStates[i])}'); + } + }); + }) + .catch((err) => { + console.error(`Invoke uploadFile failed, code is ${err.code}, message is ${err.message}`); + }) +} catch (err) { + console.error(`Invoke uploadFile failed, code is ${err.code}, message is ${err.message}`); +} +``` + +## Downloading a Network Resource File to the Application File Directory + +You can use [ohos.request](../reference/apis/js-apis-request.md) **downloadFile()** to download network resource files to a local application directory. You can use the ([ohos.file.fs](../reference/apis/js-apis-file-fs.md) APIs to access the downloaded files. For details, see [Accessing Application Files](app-file-access.md). The system service proxy implements the download. + +> **NOTE** +> +> Currently, network resource files can be downloaded only to an application file directory. +> +>
The **ohos.permission.INTERNET** permission is required for using **ohos.request**. For details about how to apply for a permission, see [Applying for Permissions](../security/accesstoken-guidelines.md). + +The following example demonstrates how to download a network resource file to a local application file directory. + +```ts +// pages/xxx.ets +// Download the network resource file to the local application file directory, and read data from the file. +import common from '@ohos.app.ability.common'; +import fs from '@ohos.file.fs'; +import request from '@ohos.request'; + +// Obtain the application file path. +let context = getContext(this) as common.UIAbilityContext; +let filesDir = context.filesDir; + +try { + request.downloadFile(context, { + url: 'https://xxxx/xxxx.txt', + filePath: filesDir + '/xxxx.txt' + }).then((downloadTask) => { + downloadTask.on('complete', () => { + console.info('download complete'); + let file = fs.openSync(filesDir + '/xxxx.txt', fs.OpenMode.READ_WRITE); + let buf = new ArrayBuffer(1024); + let readLen = fs.readSync(file.fd, buf); + console.info(`The content of file: ${String.fromCharCode.apply(null, new Uint8Array(buf.slice(0, readLen)))}`); + fs.closeSync(file); + }) + }).catch((err) => { + console.error(`Invoke downloadTask failed, code is ${err.code}, message is ${err.message}`); + }); +} catch (err) { + console.error(`Invoke downloadFile failed, code is ${err.code}, message is ${err.message}`); +} +``` diff --git a/en/application-dev/file-management/app-fs-space-statistics.md b/en/application-dev/file-management/app-fs-space-statistics.md new file mode 100644 index 0000000000000000000000000000000000000000..6854af8c857173db01bcaf730722630c4bffba1b --- /dev/null +++ b/en/application-dev/file-management/app-fs-space-statistics.md @@ -0,0 +1,54 @@ +# Obtaining Application and File System Space Statistics + +This topic describes how to obtain information about the free system space and space occupied by applications so as to prevent insufficient storage space of the system or ensure proper use of quota-controlled **cacheDir** directories. + +## Available APIs + +For details about the APIs, see [ohos.file.statvfs](../reference/apis/js-apis-file-statvfs.md) and [ohos.file.storageStatistics](../reference/apis/js-apis-file-storage-statistics.md). + +**Table 1** APIs for file system and application space statistics + +| Module| API| Description| +| -------- | -------- | -------- | +| \@ohos.file.storageStatistic | getCurrentBundleStats | Obtains the storage space of the current application, in bytes.| +| \@ohos.file.statvfs | getFreeSize | Obtains the free space of a file system, in bytes.| +| \@ohos.file.statvfs | getTotalSize | Obtains the total space of a file system, in bytes.| + +**Table 2** Attributes for application space statistics + +| BundleStats Attribute| Description| Directory for Statistics| +| -------- | -------- | -------- | +| appSize | Size of the application installation files, in bytes.| Application installation file directory:
**/data/storage/el1/bundle **| +| cacheSize | Size of the application cache files, in bytes.| Application cache file directories:
**/data/storage/el1/base/cache**
**/data/storage/el1/base/haps/entry/cache**
**/data/storage/el2/base/cache**
**/data/storage/el2/base/haps/entry/cache **| +| dataSize | Size of the application files (excluding the application installation files and cache files), in bytes.| The application files include local files, distributed files, and database files.
- Local application file directories (parent directories of the **cache** directories):
**/data/storage/el1/base**
**/data/storage/el2/base**
- Distributed application directory:
/data/storage/el2/distributedfiles
- Database directories:
**/data/storage/el1/database**
**/data/storage/el2/database **| + +## Development Example + +- Obtain the free space of **/data** of the file system. + + ```ts + import statvfs from '@ohos.file.statvfs'; + + let path = "/data"; + statvfs.getFreeSize(path, (err, number) => { + if (err) { + console.error(`Invoke getFreeSize failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info(`Invoke getFreeSize succeeded, size is ${number}`); + } + }); + ``` + +- Obtain the space occupied by the current application. + + ```ts + import storageStatistics from "@ohos.file.storageStatistics"; + + storageStatistics.getCurrentBundleStats((err, bundleStats) => { + if (err) { + console.error(`Invoke getCurrentBundleStats failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info(`Invoke getCurrentBundleStats succeeded, appsize is ${bundleStats.appSize}`); + } + }); + ``` diff --git a/en/application-dev/file-management/app-sandbox-directory.md b/en/application-dev/file-management/app-sandbox-directory.md new file mode 100644 index 0000000000000000000000000000000000000000..3424dfbc9ec457e5ab6839e521fde5caad0dd3ae --- /dev/null +++ b/en/application-dev/file-management/app-sandbox-directory.md @@ -0,0 +1,94 @@ +# Application Sandbox Directory + +The application sandbox is an isolation mechanism to prevent data from being accessed through path traversal. This mechanism allows only the application sandbox directory visible to an application. + +- The system maps a dedicated application sandbox directory in the internal storage space for each application. The directory is a collection of the [application file directory](app-file-overview.md) and a directory containing the minimum system files required during application's runtime. + +- The application sandbox specifies the minimum range of data visible to each application. In the application sandbox directory, an application can access only its own application files and the system files required for its running. The application cannot access files of other applications. The security of application files is protected in this way. + +- In each application sandbox directory, the application can save and process its own application files in the [application file directory](app-file-overview.md), and can only read the system files and directories. The application can access [user files](user-file-overview.md) by using specific APIs only with authorization from the user. + +The following figure illustrates the file access scope and modes for an application in an application sandbox. + +**Figure 1** File access in an application sandbox + +![Application sandbox file access relationship](figures/application-sandbox-file-access-relationship.png) + +## Application Sandbox Directory and Application Sandbox Path + +With the application sandbox mechanism, an application cannot learn the location and existence of other applications' directories and user file directories. In addition, all the application directories visible to an application are isolated by permission and namespace to form an independent directory view and shield the real (physical) paths. + +- As shown in the following figure, the sandbox mechanism minimizes the number of directories and files visible to a common application (third-party application). The directories and file paths visible to a common application are different from those visible to a system process. The path of a file or folder in the application sandbox directory visible to a common application is called the application sandbox path. + +- You can view the real application paths (the directory view visible to a system process) in the HDC shell environment. For details about the mappings between the application sandbox paths and real application paths, see [Mappings Between Application Sandbox Paths and Physical Paths](send-file-to-app-sandbox.md#mappings-between-application-sandbox-paths-and-physical-paths). + +- The application sandbox paths and physical paths are not in one-to-one mappings. The application sandbox paths are always less than the physical paths. You may not obtain the the application sandbox path based on a physical path in certain cases, but you can obtain the physical path based on an application sandbox path. + +**Figure 2** Different directory views to processes and applications + +![Application sandbox path](figures/application-sandbox-path.png) + +## Application File Directory and Application File Path + +The application sandbox directory includes application file directories and system file directories. + +The system file directories visible to an application are preset by OpenHarmony. + +The following figure shows the application file directories. The path of a file or a folder in the application file directory is called the application file path. The file paths in the application file directory have different attributes and characteristics. + +**Figure 3** Application file directory structure + +![Application file directory structure](figures/application-file-directory-structure.png) + +1. Level 1 directory **data/**: indicates the application file directory. + +2. Level 2 directory **storage/**: indicates a directory for persistent files of the application. + +3. Level 3 directories **el1/** and **el2/**: indicate directories for files of different encryption levels (els). + - **el1**: directory for the data that can be accessed once the device starts. This directory contains device-focused files. + - **el2**: directory for the data that can be accessed only after at lease one successful unlocking operation (by PIN, fingerprint, or facial authentication, or password-free sign-in) upon the start of the device. This directory contains user-focused files.
+ Unless otherwise required, application data is placed in the **el2** directory for security purposes. However, the data that needs to be accessed before the screen is unlocked (such as the clock, alarm, and wallpaper data) can be placed in the **el1** directory. For details about how to switch to and modify the **el** directories, see [Obtaining and Modifying el Directories](../application-models/application-context-stage.md#obtaining-and-modifying-encryption-areas). + +4. Level 4 and level 5 directories: + The application's global data is stored in the **files**, **cache**, **preferences**, **temp**, and **distributedfiles** folders in **base**. You can use **ApplicationContext** to obtain the application file paths of these folders. + + You can use **UIAbilityContext**, **AbilityStageContext**, and **ExtensionContext** to obtain application file paths related to the OpenHarmoy Ability Package (HAP). When the HAP is uninstalled, the files stored in these directories are automatically deleted, without affecting the files in app-level directories. An application in the development state contains one or more HAPs. For details, see [Application Package Structure in Stage Mode](../quick-start/application-package-structure-stage.md). + + For details about how to obtain the context and application file paths, see [Context (Stage Model)](../application-models/application-context-stage.md). + + > **NOTE** + > + > - Do not directly use file paths made up by level 1 to level 4 directory names. Incompatibility problems may occur if the directory names are changed in later versions. + > - Use **Context** to obtain application file paths, which include but are not limited to the directories highlighted in green in **Figure 3**. + + The following table describes the application file paths and lifecycle. + + **Table 1** Application file paths + + | Folder| Context Attribute Name| Type| Description| + | -------- | -------- | -------- | -------- | + | bundle | bundleCodeDir | Installation file directory| Directory for saving the HAPs of the application after an application is installed.
This directory is cleared when the application is uninstalled.
Do not access resource files by concatenating paths. Use [@ohos.resourceManager] instead.| + | base | NA | Directory for local device files| Directory for saving the application's persistent data on the device. Subdirectories include **files/**,** cache/**, **temp/**, and **haps/**.
This directory is cleared when the application is uninstalled.| + | database | databaseDir | Database directory| Directory in **el1** for saving the files operated by the distributed database service.
This directory is cleared when the application is uninstalled.| + | distributedfiles | distributedFilesDir | Distributed file directory| Directory in **el2** for saving the application files that can be directly accessed across devices.
This directory is cleared when the application is uninstalled.| + | files | filesDir | Application file directory| Directory for saving the application's persistent files on the device.
This directory is cleared when the application is uninstalled.| + | cache | cacheDir | Application cache file directory| Directory for caching the downloaded files of the application or saving the cache files regenerated on the device.
This directory is automatically cleared when the size of the **cache** directory reaches the quota or the system storage space reaches a certain threshold. The user can also clear this directory by using a system space management application.
The application needs to check whether the file still exists and determine whether to cache the file again.| + | preferences | preferencesDir | Preferences file directory| Directory for saving common application configuration and user preference data managed by using database APIs.
This directory is cleared when the application is uninstalled. For details, see [Persisting Preferences Data](../database/data-persistence-by-preferences.md).| + | temp | tempDir | Temporary file directory| Directory for saving the files generated and required during the application's runtime on the device.
This directory is cleared when the application exits.| + + The application file paths are used in the following scenarios: + + - Installation file directory
+ Used to store the code resource data of the application, including the HAPs of the application, reusable library files, and plug-ins. The code stored in this directory can be dynamically loaded. + - Database directory
+ Used to store only the application's private database data, such as database files. This directory can be used to store distributed database data only. + - Distributed file directory
+ Used to store the application's data used for distributed scenarios, including file sharing, file backup, and file processing across devices. The data stored in this directory enables the application to run smoothly on multiple devices. + - Application file directory
+ Used to store private data of the application, including persistent files, images, media files, and log files. The data is stored in this directory to ensure privacy, security, and permanent validity. + - Cached application file directory
+ Used to store cached data of the application, including offline data, cached images, database backup, and temporary files. Data stored in this directory may be automatically deleted by the system. Therefore, do not store important data in this directory. + - Preferences file directory
+ Used to store application preferences data, including preference files and configuration files. This directory applied to storing only a small amount of data. + - Temporary file directory
+ Used to store temporarily generated data of an application, including cached database data and images, temporary log files, downloaded application installation package files. The data stored in this directory is deleted after being used. diff --git a/en/application-dev/file-management/dev-user-file-manager.md b/en/application-dev/file-management/dev-user-file-manager.md new file mode 100644 index 0000000000000000000000000000000000000000..e048ad7fefa3cf5ddf26ad3764403231cc3045dd --- /dev/null +++ b/en/application-dev/file-management/dev-user-file-manager.md @@ -0,0 +1,145 @@ +# Developing a FileManager Application (Available Only for System Applications) + +OpenHarmony is prebuilt with the **FileManager** application. You can also develop your own **FileManager** as required. + +## Available APIs + +For details about the APIs, see [User File Access and Management](../reference/apis/js-apis-fileAccess.md). + +## How to Develop + +1. Configure the permissions required and import dependent modules. + Apply for the **ohos.permission.FILE_ACCESS_MANAGER** and **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permissions. For details, see [Applying for Permissions](../security/accesstoken-guidelines.md). + + > **NOTE** + > + > **ohos.permission.FILE_ACCESS_MANAGER** is required for using the user file access framework APIs. + > + > **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** is required for querying information about file management server applications supported by the system. + +2. Import the dependent modules. + + ```ts + import fileAccess from '@ohos.file.fileAccess'; + import fileExtensionInfo from '@ohos.file.fileExtensionInfo'; + ``` + + The **fileAccess** module provides APIs for basic file operations, and the **fileExtensionInfo** module provides key structs for application development. + +3. Query device information. + You can obtain attributes of one or all devices managed by the file management server in the current system. You can also filter devices as required. + + In the user file access framework, **RootInfo** indicates the attribute information of a device. For example, obtain **RootInfo** of all devices. + + ```ts + // Create a helper object for connecting to all file management servers in the system. + let fileAccessHelperAllServer = null; + createFileAccessHelper() { + try { // this.context is the context passed from EntryAbility. + fileAccessHelperAllServer = fileAccess.createFileAccessHelper(this.context); + if (!fileAccessHelperAllServer) { + console.error("createFileAccessHelper interface returns an undefined object"); + } + } catch (error) { + console.error("createFileAccessHelper failed, errCode:" + error.code + ", errMessage:" + error.message); + } + } + async getRoots() { + let rootIterator = null; + let rootInfos = []; + let isDone = false; + try { + rootIterator = await fileAccessHelperAllServer.getRoots(); + if (!rootIterator) { + console.error("getRoots interface returns an undefined object"); + return; + } + while (!isDone) { + let result = rootIterator.next(); + console.info("next result = " + JSON.stringify(result)); + isDone = result.done; + if (!isDone) + rootinfos.push(result.value); + } + } catch (error) { + console.error("getRoots failed, errCode:" + error.code + ", errMessage:" + error.message); + } + } + ``` + +4. View directories. + In the user file access framework, **FileInfo** indicates basic information about a file (directory). You can use **listfile()** to traverse all files (directories) of the next level to obtain a **FileIterator** object or use **scanfile()** to filter the specified directories and obtain the **FileIterator** object that meets the conditions. + + Currently, **listfile()** and **scanfile()** can be called by the **RootInfo** object to traverse lower-level files or filter the entire directory tree. In addition, **listfile()** and **scanfile()** can be called by the **FileInfo** object to traverse lower-level files or filter specified directories. + + ```ts + // Start from the root directory. + let rootInfo = rootinfos[0]; + let fileInfos = []; + let isDone = false; + let filter = {suffix: [".txt", ".jpg", ".xlsx"]}; // Set filter criteria. + try { + let fileIterator = rootInfo.listFile(); // Traverse the root directory of rootinfos[0] and return an iterator object. + // let fileIterator = rootInfo.scanFile(filter); // Filter the file information of device rootinfos[0] that meets the specified conditions and return an iteration object. + if (!fileIterator) { + console.error("listFile interface returns an undefined object"); + return; + } + while (!isDone) { + let result = fileIterator.next(); + console.info("next result = " + JSON.stringify(result)); + isDone = result.done; + if (!isDone) + fileInfos.push(result.value); + } + } catch (error) { + console.error("listFile failed, errCode:" + error.code + ", errMessage:" + error.message); + } + + // Start from the specified directory. + let fileInfoDir = fileInfos[0]; // fileInfoDir indicates information about a directory. + let subFileInfos = []; + let isDone = false; + let filter = {suffix: [".txt", ".jpg", ".xlsx"]}; // Set filter criteria. + try { + let fileIterator = fileInfoDir.listFile(); // Traverse files in the specified directory and return an iterator object. + // let fileIterator = rootInfo.scanFile(filter); // Filter the files in the specified directory and return an iterator object. + if (!fileIterator) { + console.error("listFile interface returns an undefined object"); + return; + } + while (!isDone) { + let result = fileIterator.next(); + console.info("next result = " + JSON.stringify(result)); + isDone = result.done; + if (!isDone) + subfileInfos.push(result.value); + } + } catch (error) { + console.error("listFile failed, errCode:" + error.code + ", errMessage:" + error.message); + } + ``` + +5. Perform operations on files or directories. + You can integrate APIs of the user file access framework to implement user behaviors, such as deleting, renaming, creating, and moving a file (directory). The following example shows how to create a file. For details about other APIs, see [User File Access and Management](../reference/apis/js-apis-fileAccess.md). + + ```ts + // The local device is used as an example. + // Create a file. + // sourceUri is the URI in fileinfo of the Download directory. + // You need to use the obtained URI for development. + let sourceUri = "file://media/file/6"; + let displayName = "file1"; + let fileUri = null; + try { + // Obtain fileAccessHelper by referring to the sample code of fileAccess.createFileAccessHelper. + fileUri = await fileAccessHelper.createFile(sourceUri, displayName); + if (!fileUri) { + console.error("createFile return undefined object"); + return; + } + console.info("createFile sucess, fileUri: " + JSON.stringify(fileUri)); + } catch (error) { + console.error("createFile failed, errCode:" + error.code + ", errMessage:" + error.message); + }; + ``` diff --git a/en/application-dev/file-management/distributed-fs-overview.md b/en/application-dev/file-management/distributed-fs-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..aaa8235b947033ab95905aa1e393bc6fd78d622f --- /dev/null +++ b/en/application-dev/file-management/distributed-fs-overview.md @@ -0,0 +1,39 @@ +# Distributed File System Overview + +OpenHarmony distributed file system (hmdfs) provides cross-device file access capabilities applicable to the following scenarios: + +- The user can use the editing software on one device to edit the files on another device. + +- Music stored on a tablet can be directly viewed and played by a in-car system. + +- The user can use a tablet to view the photos taken by another device. + +The hmdfs provides a globally consistent access view for each device dynamically connected to a network via DSoftBus and allows you to implement high-performance read and write operations on files with low latency by using basic file system APIs. + +## Distributed File System Architecture + +![Distributed File System Architecture](figures/distributed-file-system-architecture.png) + +- distributedfile_daemon: listens for device online status, establishes links over DSoftBus, and applies data transfer policies based on the security level of the device. + +- hmdfs: implements a network file system in the kernel, providing cache management, file access, metadata management, and conflict management. + - Buffer management + - After devices are connected to form a Virtual Device, hmdfs provides file access capabilities, but does not proactively transmit or copy files. Active copy is required when an application needs to save data to a local directory. + - The hmdfs ensures close-to-open cache consistency, which allows data to flushed when a client closes a file. Then, the latest data can be read when the file is opened on any other client. The hmdfs does not ensure real-time consistency of the file content. + - If data written at the peer end has not been flushed to the local end in a timely manner due to network problems, the file system flushes the data to the local end upon the next network access. However, if the data has been modified on the remote end, only the latest data can be flushed. + - File access + - OpenHarmony provides the same interface, [ohos.file.fs](../reference/apis/js-apis-file-fs.md), for accessing files in the local and distributed file systems. + - The files in the local file system are accessed in overlay mode. + - The files on another device are accessed over a synchronous network. + > **NOTE** + > + > symlink is not supported. + - Metadata management + - In distributed networking, when a file is created, deleted, or modified on a client, the latest file can be viewed on another client. The speed varies depending on the network status. + - If a device goes offline, its data is no longer visible to other devices. However, due to the delay in sensing the device offline, the files of some offline devices may also be visible to other devices. Therefore, you need to consider the network delay in application development. + - Conflict Handling + - If a file on the local end and a file on the remote end have the same name, the file on the remote end will be renamed. + - If multiple remote devices have files of the same name, the name of the file with the smallest device access ID is retained and the files on other devices will be renamed. + - In the networking scenario, the directory tree has remote files. If "duplicate file name" is displayed when a file is created, + - the conflict file is renamed "_conflict_dev_ID". The ID automatically increases from 1. + - If a local directory has the same name as a remote directory, "_remote_directory" will be added to the end of the peer directory. diff --git a/en/application-dev/file-management/figures/application-file-directory-structure.png b/en/application-dev/file-management/figures/application-file-directory-structure.png new file mode 100644 index 0000000000000000000000000000000000000000..2ccc93278cb82842e21730194772a530324dcba7 Binary files /dev/null and b/en/application-dev/file-management/figures/application-file-directory-structure.png differ diff --git a/en/application-dev/file-management/figures/application-sandbox-file-access-relationship.png b/en/application-dev/file-management/figures/application-sandbox-file-access-relationship.png new file mode 100644 index 0000000000000000000000000000000000000000..184a2659c642346fa5ca04873049933688ff2377 Binary files /dev/null and b/en/application-dev/file-management/figures/application-sandbox-file-access-relationship.png differ diff --git a/en/application-dev/file-management/figures/application-sandbox-path.png b/en/application-dev/file-management/figures/application-sandbox-path.png new file mode 100644 index 0000000000000000000000000000000000000000..12a7bc72d95ee761b4755027a343c732e448a30b Binary files /dev/null and b/en/application-dev/file-management/figures/application-sandbox-path.png differ diff --git a/en/application-dev/file-management/figures/distributed-file-system-architecture.png b/en/application-dev/file-management/figures/distributed-file-system-architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..682f5608e50b78d2be82a6c709e1238834f90015 Binary files /dev/null and b/en/application-dev/file-management/figures/distributed-file-system-architecture.png differ diff --git a/en/application-dev/file-management/figures/external-storage-device-management.png b/en/application-dev/file-management/figures/external-storage-device-management.png new file mode 100644 index 0000000000000000000000000000000000000000..34810d9d3c191833ff8dae738dc4028f9df70d60 Binary files /dev/null and b/en/application-dev/file-management/figures/external-storage-device-management.png differ diff --git a/en/application-dev/file-management/figures/faf-data-flow.png b/en/application-dev/file-management/figures/faf-data-flow.png deleted file mode 100644 index d3c6fd90b59712bf2728208f247d531b919b4b3b..0000000000000000000000000000000000000000 Binary files a/en/application-dev/file-management/figures/faf-data-flow.png and /dev/null differ diff --git a/en/application-dev/file-management/figures/file-classification-model.png b/en/application-dev/file-management/figures/file-classification-model.png new file mode 100644 index 0000000000000000000000000000000000000000..99ea098fd0fcd87f9903b21acd1945ba2e2bd277 Binary files /dev/null and b/en/application-dev/file-management/figures/file-classification-model.png differ diff --git a/en/application-dev/file-management/figures/public-file-operation.png b/en/application-dev/file-management/figures/public-file-operation.png deleted file mode 100644 index bb434a499da63e2245bd806bfc6e0d42686d883d..0000000000000000000000000000000000000000 Binary files a/en/application-dev/file-management/figures/public-file-operation.png and /dev/null differ diff --git a/en/application-dev/file-management/figures/user-file-access-framework.png b/en/application-dev/file-management/figures/user-file-access-framework.png new file mode 100644 index 0000000000000000000000000000000000000000..da3be3e609df7c18b4652fb6cd60763dbd33aad4 Binary files /dev/null and b/en/application-dev/file-management/figures/user-file-access-framework.png differ diff --git a/en/application-dev/file-management/file-access-across-devices.md b/en/application-dev/file-management/file-access-across-devices.md new file mode 100644 index 0000000000000000000000000000000000000000..840078f5f982d55a9d9a3713fafa49d632d352f8 --- /dev/null +++ b/en/application-dev/file-management/file-access-across-devices.md @@ -0,0 +1,60 @@ +# Accessing Files Across Devices + +The distributed file system provides cross-device file access capabilities for applications. For the same application installed on multiple devices, you can implement read and write of the files in the application's distributed directory (**/data/storage/el2/distributedfiles/**) across devices by using [ohos.file.fs](app-file-access.md). For example, device A and device B are installed with the same application. After device A and device B are connected to form a Virtual Device, the application on device A can access the files of the same application on Device B. What you need to do is place the files to the distributed directory. + +## How to Develop + +1. Complete distributed networking for the devices. + Connect the devices to a LAN, and complete authentication of the devices. The devices must have the same account number. + +2. Implement cross-device access to the files of the same application. + Place the files in the **distributedfiles/** directory of the application sandbox to implement access from difference devices. + + For example, create a test file in the **distributedfiles/** directory on device A and write data to the file. For details about how to obtain the context in the example, see [Obtaining the Context of UIAbility](../application-models/uiability-usage.md#obtaining-the-context-of-uiability). + + ```ts + import fs from '@ohos.file.fs'; + + let context =...; // Obtain the UIAbilityContext information of device A. + let pathDir = context.distributedFilesDir; + // Obtain the file path of the distributed directory. + let filePath = pathDir + '/test.txt'; + + try { + // Create a file in the distributed directory. + let file = fs.openSync(filePath, fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); + console.info('Succeeded in createing.'); + // Write data to the file. + fs.writeSync(file.fd, 'content'); + // Close the file. + fs.closeSync(file.fd); + } catch (err) { + console.error(`Failed to openSync / writeSync / closeSync. Code: ${err.code}, message: ${err.message}`); + } + ``` + + Read the file on device B. + + ```ts + import fs from '@ohos.file.fs'; + + let context =...; // Obtain the UIAbilityContext information of device B. + let pathDir = context.distributedFilesDir; + // Obtain the file path of the distributed directory. + let filePath = pathDir + '/test.txt'; + + try { + // Open the file in the distributed directory. + let file = fs.openSync(filePath, fs.OpenMode.READ_WRITE); + // Set the buffer for receiving the read data. + let buffer = new ArrayBuffer(4096); + // Read the file. The return value is the number of read bytes. + let num = fs.readSync(file.fd, buffer, { + offset: 0 + }); + // Print the read data. + console.info('read result: ' + String.fromCharCode.apply(null, new Uint8Array(buffer.slice(0, num)))); + } catch (err) { + console.error(`Failed to openSync / readSync. Code: ${err.code}, message: ${err.message}`); + } + ``` diff --git a/en/application-dev/file-management/file-access-framework-overview.md b/en/application-dev/file-management/file-access-framework-overview.md deleted file mode 100644 index 1cdf4f6c3513606c7361b951f24d3424bb065113..0000000000000000000000000000000000000000 --- a/en/application-dev/file-management/file-access-framework-overview.md +++ /dev/null @@ -1,44 +0,0 @@ -# File Access Framework Overview - -On devices running OpenHarmony 3.2 (API version 9) or later, applications can access public files on the local device, remote device, and external storage device, as well as files shared by multiple users, based on the File Access Framework (FAF). - -To ensure user data privacy, this framework allows users to create, open, delete, rename, and move files on the file access server only through the **File Manager** and **File Picker** applications. - -The user data of an application is stored on the device even after the application is uninstalled. - -If a system application needs to access public files on the local device, use [File Path Management](medialibrary-filepath-guidelines.md). - -> **NOTE** -> 1. For a non-management system application, for example, **Gallery**, use the **mediaLibrary** APIs for direct file operation. -> 2. In principle, do not mix use the FAF APIs with the mediaLibrary APIs. - -## FAF Mechanism -Based on the OpenHarmony [ExtensionAbility mechanism](../application-models/extensionability-overview.md), the FAF provides unified APIs for external systems. With these APIs, applications can preview and operate public files to implement their own logic. - -You can visit the [source repository](https://gitee.com/openharmony/filemanagement_user_file_service) for more details. - -The following figure shows the FAF-based file operation process. - -**Figure 1** Hierarchy of public file operations - -![](figures/public-file-operation.png) - -- **File access client**: an application that needs to access or operate public files. By starting the file selector application, it enables users to perform file operations on the UI. -- **File selector application**: a system application that allows users to access all shared datasets. You can use the FAF APIs to operate the datasets. -- **File access server**: a service that supports dataset sharing in the system. Currently, [UserFileManager](https://gitee.com/openharmony/multimedia_medialibrary_standard) and ExternalFileManager are available. UserFileManager manages datasets on local disks and distributed devices, and ExternalFileManager manages datasets on external storage devices such as SD cards and USB flash drives. You can also share your own datasets based on the FAF server configuration. - -The FAF has the following features: -- Users can browse the datasets provided by all file server applications in the system, rather than those provided by a single application. -- The file access client can operate files through the file selector application, without obtaining the permission to use the FAF. -- Multiple temporarily mounted devices, such as external storage cards and distributed devices, can be accessed at the same time. - -## Data Models -Data models in the FAF are transferred through URI, FileInfo, and RootInfo. For details, see [fileExtension](../reference/apis/js-apis-fileExtensionInfo.md). Applications on the file access server can use the **FileAccessExtensionAbility** APIs to securely share their data. - -**Figure 2** Data flow of the public file access framework - -![](figures/faf-data-flow.png) - -NOTE -- In the FAF, the file access client does not directly interact with the file access server. The client only needs to have the permission to start the file selector application. -- The file selector application provides a standard document access UI for users, even if the underlying file access servers differ greatly. diff --git a/en/application-dev/file-management/file-management-overview.md b/en/application-dev/file-management/file-management-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..2fbd4b4a036de1f20f8aa64825ab1dcb60f23414 --- /dev/null +++ b/en/application-dev/file-management/file-management-overview.md @@ -0,0 +1,25 @@ +# File Management Overview + +The data in an operating system (OS) can be classified into the following types based on the data structure: + +- Structured data: data that can be defined in a unified data model. The structured data is generally stored in a database. In application development, the management of structured data is implemented by the [data management module](../database/data-mgmt-overview.md). + +- Unstructured data: data that does not conform to any predefined data structure or model and cannot be easily presented in two-dimensional database tables. Unstructured data includes files in a variety of formats, such as documents, images, videos, and audio clips. In application development, the management of unstructured data is implemented by the file management module, which will be elaborated in this document. + +In the file management module, the files can be classified into the following types based on the file owner: + +- [Application files](app-file-overview.md): files of an application, including the application's installation files, resource files, and cached files. + +- [User files](user-file-overview.md): files of a user who logs in to the device, including the user's images, video and audio clips, and documents. + +- System files: files irrelevant to applications and users, including public libraries, device files, and system resource files. System files do not need to be managed by developers and are not described in this document. + +The file systems can be classified into the following types based on the file storage location (data source location): + +- Local file system: provides capabilities for accessing the files stored on a local device or its external storage devices (such as USB flash drives and removable hard drives). The local file system is the most basic file system and is not described in this document. + +- [Distributed file system](distributed-fs-overview.md): provides capabilities for accessing files across devices, which are connected through a computer network. + +**Figure 1** Files in an OS + +![File classification model](figures/file-classification-model.png) diff --git a/en/application-dev/file-management/filepicker-guidelines.md b/en/application-dev/file-management/filepicker-guidelines.md deleted file mode 100644 index ec813e256d3f4c1b3fe302aaf1653866a837a36a..0000000000000000000000000000000000000000 --- a/en/application-dev/file-management/filepicker-guidelines.md +++ /dev/null @@ -1,65 +0,0 @@ -# FilePicker Guide - -FilePicker is a system application preset in OpenHarmony. You can use it to select and save files. For details about the implementation of FilePicker, see [applications_filepicker](https://gitee.com/openharmony/applications_filepicker). - -FilePicker provides the following modes: -- **choose**: Use this mode when an application needs to select and upload or send files (including media resources such as images, and audio and video clips) in the device. When this mode is selected, the FilePicker **choose** mode window will be triggered to display a dialog box for you to select a file. You can select the target file and tap **Upload**. The application will receive the URI of the target file returned by FilePicker. -- **save**: Use this mode when an application needs to download and save files (including media resources such as images and audio and video clips). When this mode is selected, the FilePicker **save** mode window will be triggered to display a dialog box for you to select the destination path of the file to save. You can select the destination path and tap **Save**. The application will receive the URI of the saved file returned by FilePicker. - -## Development Guidelines - -> **NOTE** -> FilePicker supports only the applications developed based on the stage model. -> For details about the stage model, see [Interpretation of the Application Model](../application-models/application-model-description.md). - -You can use [AbilityContext.startAbilityForResult(want, options)](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartabilityforresult-1) with different parameters to start FilePicker in different modes. - -You need to use [Want](../reference/apis/js-apis-application-want.md) to specify **bundleName** and **abilityName** to start FilePicker. For details, see the following sample code. - -You also need to set **Want.parameters** to specify the FilePicker mode to start and the name of the file to save. -- To select a file, set **'startMode': 'choose'**. -- To save a file, set **'startMode': 'save'** and **'saveFile'**. - -You can set **options** of the [StartOptions](../reference/apis/js-apis-app-ability-startOptions.md) type to specify the dialog box style. The recommended value is **windowMode: 102**, which indicates a floating window. - -> **CAUTION** -> - In the **save** mode, a strong verification is performed on the file path based on the name of the file to save. For details about the file path format, see [File Path Management](medialibrary-filepath-guidelines.md). -> - If a file with the same name exists, a dialog box will be displayed asking you whether to overwrite the existing file. - -ArkTS sample code: -```ts -// Start FilePicker to select a file. -globalThis.context.startAbilityForResult( - { - action: "ohos.want.action.OPEN_FILE", - parameters: { - 'startMode': 'choose', //choose or save - } - }, - { windowMode: 102 } -) - -// Start FilePicker to save a file. -globalThis.context.startAbilityForResult( - { - action: "ohos.want.action.CREATE_FILE", - parameters: { - 'startMode': 'save', //choose or save - 'saveFile': 'test.jpg', - } - }, - { windowMode: 102 } -) - -// Data returned by FilePicker to startAbilityForResult. -let abilityResult = { - resultCode: resultCode, - want: { - parameters: { - 'startMode': startMode, - 'result': result - } - } -} -globalThis.context.terminateSelfWithResult(abilityResult) -``` diff --git a/en/application-dev/file-management/manage-external-storage.md b/en/application-dev/file-management/manage-external-storage.md new file mode 100644 index 0000000000000000000000000000000000000000..9889b4f8f0f6caaf49f0801c6d92ffe2f8bfc11f --- /dev/null +++ b/en/application-dev/file-management/manage-external-storage.md @@ -0,0 +1,87 @@ +# Managing External Storage Devices (Available Only for System Applications) + +External storage devices are pluggable. OpenHarmony provides the functions of listening for the device insertion and removal events and mounting/unmounting an external storage device. + +External storage devices are managed by the StorageManager and StorageDaemon services. StorageDaemon implements the underlying listening and mount/unmount functions. StorageManager provides status change notifications, query, and management capabilities for system applications. + +**Figure 1** External storage device management + +![External storage device management](figures/external-storage-device-management.png) + +- When an external storage device is inserted, the StorageDaemon process obtains an insertion event over netlink and creates a disk device and volume. The created volume is in the **UNMOUNTED** state. + +- Then, the StorageDaemon process checks the volume. During the check process, the volume is in the **CHECKING** state. + - The StorageDaemon process mounts the volume if the check is successful. If the mount operation is successful, the volume state changes to **MOUNTED** and StorageManager is instructed to send the COMMON_EVENT_VOLUME_MOUNTED broadcast. + - If the check fails, the volume state changes to **UNMOUNTED**. + +- For a volume in the **MOUNTED** state: + - If the device is directly removed, the volume information will be deleted and COMMON_EVENT_VOLUME_BAD_REMOVAL is broadcast. + - If the user chooses **Eject device**, the volume state changes to **EJECTING** and the COMMON_EVENT_VOLUME_EJECT is broadcast. After StorageDaemon unmounts the volume, the volume state changes to **UNMOUNTED** and COMMON_EVENT_VOLUME_UNMOUNTED is broadcast. + +- For a volume in the **UNMOUNTED** state, removing the device will delete the volume information and broadcast COMMON_EVENT_VOLUME_REMOVED. + +## Available APIs + +For details about APIs, see [Volume Management](../reference/apis/js-apis-file-volumemanager.md). + +The following table describes the broadcast related parameters. + +**Table 1** Broadcast parameters + +| Broadcast| Parameter| +| -------- | -------- | +| usual.event.data.VOLUME_REMOVED | **id**: ID of the volume.
**diskId**: ID of the disk to which the volume belongs.| +| usual.event.data.VOLUME_UNMOUNTED | **id**: ID of the volume.
**diskId**: ID of the disk to which the volume belongs.
**volumeState**: state of the volume.| +| usual.event.data.VOLUME_MOUNTED | **id**: ID of the volume.
**diskId**: ID of the disk to which the volume belongs.
**volumeState**: state of the volume.
**fsUuid**: universally unique identifier (UUID) of the volume.
**path**: path where the volume is mounted.| +| usual.event.data.VOLUME_BAD_REMOVAL | **id**: ID of the volume.
**diskId**: ID of the disk to which the volume belongs.| +| usual.event.data.VOLUME_EJECT | **id**: ID of the volume.
**diskId**: ID of the disk to which the volume belongs.
**volumeState**: state of the volume.| + +## How to Develop + +Subscribe to broadcast events to notify the insertion and removal of external storage devices. The volumes can be queried and managed based on the volume information obtained from the broadcast. + +1. Apply for permissions. + Apply for the **ohos.permission.STORAGE_MANAGER** permission for subscribing to volume broadcast events. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). + +2. Subscribe to broadcast events. + Subscribe to the following events: + + - "usual.event.data.VOLUME_REMOVED": The device is removed. + - "usual.event.data.VOLUME_UNMOUNTED": The volume is unmounted. + - "usual.event.data.VOLUME_MOUNTED": The volume is mounted. + - "usual.event.data.VOLUME_BAD_REMOVAL": The device is forcibly removed. + - "usual.event.data.VOLUME_EJECT": The device is being ejected. + + ```ts + import CommonEvent from '@ohos.commonEventManager'; + import volumeManager from '@ohos.file.volumeManager'; + + const subscribeInfo = { + events: [ + "usual.event.data.VOLUME_REMOVED", + "usual.event.data.VOLUME_UNMOUNTED", + "usual.event.data.VOLUME_MOUNTED", + "usual.event.data.VOLUME_BAD_REMOVAL", + "usual.event.data.VOLUME_EJECT" + ] + }; + let subscriber = await CommonEvent.createSubscriber(subscribeInfo); + ``` + +3. Obtain the volume information from the broadcast. + + ```ts + CommonEvent.subscribe(subscriber, function (err, data) { + if (data.event === 'usual.event.data.VOLUME_MOUNTED') { + // Manage the volume device based on the information obtained from the broadcast. + let volId = data.parameters.id; + volumeManager.getVolumeById(volId, function(error, vol) { + if (error) { + console.error('volumeManager getVolumeById failed'); + } else { + console.info('volumeManager getVolumeById successfully, the volume state is ' + vol.state); + } + }) + } + }) + ``` diff --git a/en/application-dev/file-management/medialibrary-album-guidelines.md b/en/application-dev/file-management/medialibrary-album-guidelines.md deleted file mode 100644 index 0fa043bad49b51aff526198137550f5079bd4349..0000000000000000000000000000000000000000 --- a/en/application-dev/file-management/medialibrary-album-guidelines.md +++ /dev/null @@ -1,94 +0,0 @@ -# Album Management - -You can use the APIs provided by the **mediaLibrary** module to create and delete an album and obtain images in the album. - -> **NOTE** -> -> Before developing features, read [MediaLibrary Overview](medialibrary-overview.md) to learn how to obtain a **MediaLibrary** instance and request the permissions to call the APIs of **MediaLibrary**. - -To ensure the application running efficiency, most **MediaLibrary** API calls are asynchronous, and both callback and promise modes are provided for these APIs. The following code samples use the promise mode. For details about the APIs, see [MediaLibrary API Reference](../reference/apis/js-apis-medialibrary.md). - -## Obtaining Images and Videos in an Album - -You can obtain images and videos in an album in either of the following ways: - -- Call [MediaLibrary.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-1) with an album specified to obtain the media assets. For details, see [Querying Media Assets with the Specified Album Name](medialibrary-resource-guidelines.md#querying-media-assets-with-the-specified-album-name). - -- Call [Album.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-3) to obtain an **Album** instance, so as to obtain the media assets in it. For details, see [Obtaining Images and Videos in an Album](medialibrary-resource-guidelines.md#obtaining-images-and-videos-in-an-album). - -## Creating an Album - -You can use [MediaLibrary.createAsset](../reference/apis/js-apis-medialibrary.md#createasset8-1), with the relative path set, to create an album while adding a media asset to the album. The relative path is the album name. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.WRITE_MEDIA**. - -The following describes how to create an album named **myAlbum**. - -**How to Develop** - -1. Call **getPublicDirectory** to obtain the public directory that stores files of a certain type. - - For details about the operation, see [Obtaining a Public Directory](medialibrary-filepath-guidelines.md#obtaining-a-public-directory). - -2. Call **createAsset** to create an image, with the relative path set to **path + 'myAlbum/'**. - - This operation creates an album and adds an image to it. - -```ts -async function example() { - let mediaType = mediaLibrary.MediaType.IMAGE; - let DIR_IMAGE = mediaLibrary.DirectoryType.DIR_IMAGE; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const path = await media.getPublicDirectory(DIR_IMAGE); - // myAlbum is the path for storing the new file and the name of the new album. - media.createAsset(mediaType, 'test.jpg', path + 'myAlbum/', (err, fileAsset) => { - if (fileAsset === undefined) { - console.error('createAlbum failed, message = ' + err); - } else { - console.info('createAlbum successfully, message = ' + JSON.stringify(fileAsset)); - } - }); -} -``` - -## Renaming an Album - -Renaming modifies the **FileAsset.albumName** attribute of the album, that is, the album name. After the modification, call [Album.commitModify](../reference/apis/js-apis-medialibrary.md#commitmodify8-3) to commit the modification to the database. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.WRITE_MEDIA**. - -The following describes how to rename the album **newAlbum**. - -**How to Develop** - -1. Create a retrieval condition for obtaining the target album. -2. Call **getAlbums** to obtain the album list. -3. Rename the album **newAlbum**. -4. Call **Album.commitModify** to commit the modification of the attributes to the database. - -```ts -async function example() { - let AlbumNoArgsfetchOp = { - selections: '', - selectionArgs: [], - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let albumList = await media.getAlbums(AlbumNoArgsfetchOp); - let album = albumList[0]; - album.albumName = 'newAlbum'; - // Void callback. - album.commitModify().then(() => { - console.info("albumRename successfully"); - }).catch((err) => { - console.error("albumRename failed with error: " + err); - }); -} -``` diff --git a/en/application-dev/file-management/medialibrary-filepath-guidelines.md b/en/application-dev/file-management/medialibrary-filepath-guidelines.md deleted file mode 100644 index 1e310ef9312499bb131affb620ac7758e5033778..0000000000000000000000000000000000000000 --- a/en/application-dev/file-management/medialibrary-filepath-guidelines.md +++ /dev/null @@ -1,249 +0,0 @@ -# File Path Management - -User data on OpenHarmony is managed by the **mediaLibrary** module in a unified manner. You can use the APIs provided by this module to access and operate the user data. - -> **NOTE** -> -> Before developing features, read [MediaLibrary Overview](medialibrary-overview.md) to learn how to obtain a **MediaLibrary** instance and request the permissions to call the APIs of **MediaLibrary**. - -To ensure the application running efficiency, most **MediaLibrary** API calls are asynchronous, and both callback and promise modes are provided for these APIs. The following code samples use the promise mode. For details about the APIs, see [MediaLibrary API Reference](../reference/apis/js-apis-medialibrary.md). - -## File Formats Supported by Public Directories - -Before using file paths for development, learn the file formats supported by each public directory. -> **CAUTION** -> -> The following table lists only the file types that can be identified by the system. In your application development, pay attention to the file formats supported by the corresponding interfaces.
For example, only .jpeg and .webp can be used for image encoding, and only .jpg, .png, .gif, .bmp, .webp, and .raw can be used for image decoding. - -| Directory | Directory Type | Media Type | Description | Supported File Format | -| ---------- | ------------- | ------------- | -------------- | ------------------------------------------------------------ | -| Camera/ | DIR_CAMERA | VIDEO and IMAGE | Directory for storing images and videos taken by the camera. Videos and images can be stored in this directory and its subdirectories.| .bmp / .bm / .gif / .jpg /. jpeg / .jpe / .png / .webp / .raw / .svg / .heif / .mp4 / .3gp / .mpg / .mov / .webm / .mkv | -| Videos/ | DIR_VIDEO | VIDEO | Dedicated video directory. Only videos can be stored in this directory and its subdirectories.| .mp4 / .3gp / .mpg / .mov / .webm / .mkv | -| Pictures/ | DIR_IMAGE | IMAGE | Dedicated image directory. Only images can be stored in this directory and its subdirectories.| .bmp / .bm / .gif / .jpg /. jpeg / .jpe / .png / .webp / .raw / .svg / .heif | -| Audios/ | DIR_AUDIO | AUDIO |Dedicated audio directory. Only audio files can be stored in this directory and its subdirectories.| .aac/.mp3/.flac/.wav/.ogg | -| Documents/ | DIR_DOCUMENTS | FILE |Dedicated file directory. Only files except audios, images, and videos can be stored in this directory and its subdirectories.| - | -| Download/ | DIR_DOWNLOAD | ALLTYPE |Directory for storing downloaded files. The types of files in this directory and its subdirectories are not restricted.| - | - -## Obtaining a Public Directory - -Different types of files are stored in different public directories. You can call [getPublicDirectory](../reference/apis/js-apis-medialibrary.md#getpublicdirectory8-1) to obtain the public directory that stores files of a certain type. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.READ_MEDIA**. - -The following describes how to obtain the public directory that stores camera files. - -```ts -async function example(){ - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let DIR_CAMERA = mediaLibrary.DirectoryType.DIR_CAMERA; - const dicResult = await media.getPublicDirectory(DIR_CAMERA); - if (dicResult == 'Camera/') { - console.info('mediaLibraryTest : getPublicDirectory passed'); - } else { - console.error('mediaLibraryTest : getPublicDirectory failed'); - } -} -``` - -## Copying Files Between the Application Sandbox and the Public Directory - -OpenHarmony provides the application sandbox to minimize the leakage of application data and user privacy information. - -Users can access files stored in the public directories through the system applications **Files** and **Gallery**. However, files in the application sandbox can be accessed only by the application itself. - -### Copying a File - -You can call [mediaLibrary.FileAsset.open](../reference/apis/js-apis-medialibrary.md#open8-1) to open a file in a public directory. - -You can call [fs.open](../reference/apis/js-apis-file-fs.md#fsopen) to open a file in the application sandbox. The sandbox directory can be accessed only through the application context. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permissions **ohos.permission.READ_MEDIA** and **ohos.permission.WRITE_MEDIA**. -- You have imported the module [@ohos.file.fs](../reference/apis/js-apis-file-fs.md) in addition to @ohos.multimedia.mediaLibrary. -- The **testFile.txt** file has been created and contains content. - -**How to Develop** - -1. Call [context.filesDir](../reference/apis/js-apis-file-fs.md) to obtain the directory of the application sandbox. -2. Call **MediaLibrary.getFileAssets** and **FetchFileResult.getFirstObject** to obtain the first file in the result set of the public directory. -3. Call **fs.open** to open the file in the sandbox. -4. Call **fileAsset.open** to open the file in the public directory. -5. Call [fs.copyfile](../reference/apis/js-apis-file-fs.md#fscopyfile) to copy the file. -6. Call **fileAsset.close** and [fs.close](../reference/apis/js-apis-file-fs.md#fsclose) to close the file. - -**Example 1: Copying Files from the Public Directory to the Sandbox** - -```ts -async function copyPublic2Sandbox() { - try { - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let sandboxDirPath = context.filesDir; - let fileKeyObj = mediaLibrary.FileKey; - let fileAssetFetchOp = { - selections: fileKeyObj.DISPLAY_NAME + '= ?', - selectionArgs: ['testFile.txt'], - }; - let fetchResult = await media.getFileAssets(fileAssetFetchOp); - let fileAsset = await fetchResult.getFirstObject(); - - let fdPub = await fileAsset.open('rw'); - let fdSand = await fs.open(sandboxDirPath + '/testFile.txt', fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); - await fs.copyFile(fdPub, fdSand.fd); - - await fileAsset.close(fdPub); - await fs.close(fdSand.fd); - - let content_sand = await fs.readText(sandboxDirPath + '/testFile.txt'); - console.info('content read from sandbox file: ', content_sand) - } catch (err) { - console.info('[demo] copyPublic2Sandbox fail, err: ', err); - } -} -``` - -**Example 2: Copying a File from the Sandbox to the Public Directory** - -```ts -async function copySandbox2Public() { - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let sandboxDirPath = context.filesDir; - - let DIR_DOCUMENTS = mediaLibrary.DirectoryType.DIR_DOCUMENTS; - const publicDirPath = await media.getPublicDirectory(DIR_DOCUMENTS); - try { - let fileAsset = await media.createAsset(mediaLibrary.MediaType.FILE, 'testFile02.txt', publicDirPath); - console.info('createFile successfully, message = ' + fileAsset); - } catch (err) { - console.error('createFile failed, message = ' + err); - } - try { - let fileKeyObj = mediaLibrary.FileKey; - let fileAssetFetchOp = { - selections: fileKeyObj.DISPLAY_NAME + '= ?', - selectionArgs: ['testFile02.txt'], - }; - let fetchResult = await media.getFileAssets(fileAssetFetchOp); - var fileAsset = await fetchResult.getFirstObject(); - } catch (err) { - console.error('file asset get failed, message = ' + err); - } - let fdPub = await fileAsset.open('rw'); - let fdSand = await fs.open(sandboxDirPath + 'testFile.txt', fs.OpenMode.READ_WRITE); - await fs.copyFile(fdSand.fd, fdPub); - await fileAsset.close(fdPub); - await fs.close(fdSand.fd); - let fdPubRead = await fileAsset.open('rw'); - try { - let arrayBuffer = new ArrayBuffer(4096); - await fs.read(fdPubRead, arrayBuffer); - var content_pub = String.fromCharCode(...new Uint8Array(arrayBuffer)); - fileAsset.close(fdPubRead); - } catch (err) { - console.error('read text failed, message = ', err); - } - console.info('content read from public file: ', content_pub); -} -``` - -### Reading and Writing a File - -You can use **FileAsset.open** and **FileAsset.close** of [mediaLibrary](../reference/apis/js-apis-medialibrary.md) to open and close a file, and use **fs.read** and **fs.write** in [file.fs](../reference/apis/js-apis-file-fs.md) to read and write the file. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permissions **ohos.permission.READ_MEDIA** and **ohos.permission.WRITE_MEDIA**. -- You have imported the module [@ohos.file.fs](../reference/apis/js-apis-file-fs.md) in addition to @ohos.multimedia.mediaLibrary. - -**How to Develop** - -1. Create a file. - -```ts -async function example() { - let mediaType = mediaLibrary.MediaType.FILE; - let DIR_DOCUMENTS = mediaLibrary.DirectoryType.DIR_DOCUMENTS; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const path = await media.getPublicDirectory(DIR_DOCUMENTS); - media.createAsset(mediaType, "testFile.txt", path).then((asset) => { - console.info("createAsset successfully:" + JSON.stringify(asset)); - }).catch((err) => { - console.error("createAsset failed with error: " + err); - }); -} -``` - -2. Call **FileAsset.open** to open the file. - -3. Call [fs.write](../reference/apis/js-apis-file-fs.md#fswrite) to write a string to the file. - -4. Call [fs.read](../reference/apis/js-apis-file-fs.md#fsread) to read the file and save the data read in an array buffer. - -5. Convert the array buffer to a string. - -6. Use **FileAsset.close** to close the file. - -**Example 1: Opening an Existing File and Writing Data to It** - -```ts -async function writeOnlyPromise() { - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let fileKeyObj = mediaLibrary.FileKey; - let fileAssetFetchOp = { - selections: fileKeyObj.DISPLAY_NAME + '= ?', - selectionArgs: ['testFile.txt'], - }; - let fetchResult = await media.getFileAssets(fileAssetFetchOp); - let fileAsset = await fetchResult.getFirstObject(); - console.info('fileAssetName: ', fileAsset.displayName); - - try { - let fd = await fileAsset.open('w'); - console.info('file descriptor: ', fd); - await fs.write(fd, "Write file test content."); - await fileAsset.close(fd); - } catch (err) { - console.error('write file failed, message = ', err); - } -} -``` - -**Example 2: Opening an Existing File and Reading Data from It** - -```ts -async function readOnlyPromise() { - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let fileKeyObj = mediaLibrary.FileKey; - let fileAssetFetchOp = { - selections: fileKeyObj.DISPLAY_NAME + '= ?' , - selectionArgs: ['testFile.txt'], - }; - let fetchResult = await media.getFileAssets(fileAssetFetchOp); - let fileAsset = await fetchResult.getFirstObject(); - console.info('fileAssetName: ', fileAsset.displayName); - - try { - let fd = await fileAsset.open('r'); - let arrayBuffer = new ArrayBuffer(4096); - await fs.read(fd, arrayBuffer); - let fileContent = String.fromCharCode(...new Uint8Array(arrayBuffer)); - globalThis.fileContent = fileContent; - globalThis.fileName = fileAsset.displayName; - console.info('file content: ', fileContent); - await fileAsset.close(fd); - } catch (err) { - console.error('read file failed, message = ', err); - } -} -``` diff --git a/en/application-dev/file-management/medialibrary-overview.md b/en/application-dev/file-management/medialibrary-overview.md deleted file mode 100644 index f7e0ab9ac4772a7770b1e2bc9f6b63845cb654b5..0000000000000000000000000000000000000000 --- a/en/application-dev/file-management/medialibrary-overview.md +++ /dev/null @@ -1,127 +0,0 @@ -# MediaLibrary Development Overview - -The **mediaLibrary** module provides APIs for you to access and modify media files. - -- You can manage [media assets (audios, videos, image, and files)](medialibrary-resource-guidelines.md) as follows: - - Query media assets. - - Obtain an image or a video. - - Obtain the thumbnail of an image or a video. - - Create a media asset. - - Rename a media asset. - - Move a media asset to the recycle bin. -- You can manage [file paths](medialibrary-filepath-guidelines.md) as follows: - - Obtain the public directory that stores files of a certain type. - - Copy files between the application sandbox and the public directory. - - Read and write a file. -- You can manage [albums](medialibrary-album-guidelines.md) as follows: - - Obtain images and videos in an album. - - Create an album. - - Rename an album. - -> **NOTE** -> -> This development guide applies only to the stage model (available from API version 9). - -To access and modify personal media data, an application must obtain a **MediaLibrary** instance and request the media asset read and write permissions from the user. Unless otherwise specified, the **MediaLibrary** APIs are used in **pages/index.ets** or custom .ets files of the project code. - -Before using the **MediaLibrary** APIs to develop features, you must learn how to: - -- [Obtain a MediaLibrary Instance](#obtaining-a-medialibrary-instance) -- [Request Permissions](#requesting-permissions) - -## Obtaining a MediaLibrary Instance - -An application must call [getMediaLibrary](../reference/apis/js-apis-medialibrary.md#medialibrarygetmedialibrary8) to obtain a **MediaLibrary** instance based on the application context. Through this instance, the application can access and modify personal media data (such as audios, videos, images, and files). - -**How to Develop** - -1. Import the **mediaLibrary** module. -2. Call **getContext** to obtain the application context. -3. Obtain a **MediaLibrary** instance. - -```ts -import mediaLibrary from '@ohos.multimedia.mediaLibrary'; - -const context = getContext(this); -let media = mediaLibrary.getMediaLibrary(context); -``` - -## Requesting Permissions - -To read and write a **MediaLibrary** instance, you must have the required permissions, as described in the table below. Before requesting the permissions, ensure that the [basic principles for permission management](../security/accesstoken-overview.md#basic-principles-for-permission-management) are met. - -| Permission | Description | Authorization Mode | -| ------------------------------ | ------------------------------------------ | ---------- | -| ohos.permission.READ_MEDIA | Allows an application to read media files from the user's external storage.| user_grant | -| ohos.permission.WRITE_MEDIA | Allows an application to read media files from and write media files into the user's external storage.| user_grant | -| ohos.permission.MEDIA_LOCATION | Allows an application to access geographical locations in the user's media file.| user_grant | - -After configuring the permissions in the **module.json5** file, the application must call [abilityAccessCtrl.requestPermissionsFromUser](../reference/apis/js-apis-abilityAccessCtrl.md#requestpermissionsfromuser9) to check for the required permissions and if they are not granted, request the permissions from the user by displaying a dialog box. - -> **NOTE**
Even if the user has granted a permission, the application must check for the permission before calling an API protected by the permission. It should not persist the permission granted status, because the user can revoke the permission through the system application **Settings**. - -**How to Develop** - -1. Declare the permissions in the **module.json5** file. Add the **requestPermissions** tag under **module** in the file, and set the tag based on the project requirements. For details about the tag, see [Guide for Requesting Permissions from User](../security/accesstoken-guidelines.md). - -```json -{ - "module": { - "requestPermissions": [ - { - "name": "ohos.permission.MEDIA_LOCATION", - "reason": "$string:reason", - "usedScene": { - "abilities": [ - "EntryAbility" - ], - "when": "always" - } - }, - { - "name": "ohos.permission.READ_MEDIA", - "reason": "$string:reason", - "usedScene": { - "abilities": [ - "EntryAbility" - ], - "when": "always" - } - }, - { - "name": "ohos.permission.WRITE_MEDIA", - "reason": "$string:reason", - "usedScene": { - "abilities": [ - "EntryAbility" - ], - "when": "always" - } - } - ] - } -} -``` - -2. In the **Ability.ts** file, call **requestPermissionsFromUser** in the **onWindowStageCreate** callback to check for the required permissions and if they are not granted, request the permissions from the user by displaying a dialog box. - -```ts -import UIAbility from '@ohos.app.ability.UIAbility'; -import abilityAccessCtrl, {Permissions} from '@ohos.abilityAccessCtrl'; - -export default class EntryAbility extends UIAbility { - onWindowStageCreate(windowStage) { - let list : Array = ['ohos.permission.READ_MEDIA', 'ohos.permission.WRITE_MEDIA']; - let permissionRequestResult; - let atManager = abilityAccessCtrl.createAtManager(); - atManager.requestPermissionsFromUser(this.context, list, (err, result) => { - if (err) { - console.error('requestPermissionsFromUserError: ' + JSON.stringify(err)); - } else { - permissionRequestResult = result; - console.info('permissionRequestResult: ' + JSON.stringify(permissionRequestResult)); - } - }); - } -} -``` diff --git a/en/application-dev/file-management/medialibrary-resource-guidelines.md b/en/application-dev/file-management/medialibrary-resource-guidelines.md deleted file mode 100644 index 054591847ffa156f5ee85cf5e2412b215750e283..0000000000000000000000000000000000000000 --- a/en/application-dev/file-management/medialibrary-resource-guidelines.md +++ /dev/null @@ -1,384 +0,0 @@ -# Media Asset Management - -Your applications can use the APIs provided by the **mediaLibrary** module to perform operations on media assets such as audios, videos, images, and files. - -> **NOTE** -> -> Before developing features, read [MediaLibrary Overview](medialibrary-overview.md) to learn how to obtain a **MediaLibrary** instance and request the permissions to call the APIs of **MediaLibrary**. - -To maximize the application running efficiency, most **MediaLibrary** API calls are asynchronous in callback or promise mode. The following code samples use the promise mode. For details about the APIs, see [MediaLibrary API Reference](../reference/apis/js-apis-medialibrary.md). - -## Querying Media Assets - -You can query media assets by condition such as the media type, date, or album name. - -To do so, call [MediaLibrary.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-1), with a **MediaFetchOptions** object passed in to specify the conditions. In this object, **MediaFetchOptions.selections** are the retrieval conditions, and the enumerated values of **FileKey** are used as the column names of the conditions; **MediaFetchOptions.selectionArgs** are the values of the conditions. You can also specify **order** (sorting mode of the search result), **uri** (file URI), and **networkId** (network ID of the registered device) as the conditions. - -To obtain the object at the specified position (for example, the first, the last, or with the specified index) in the result set, call [FetchFileResult](../reference/apis/js-apis-medialibrary.md#fetchfileresult7). In this section, **getNextObject** is used cyclically to obtain all media assets in the result set. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.READ_MEDIA**. - -### Querying Media Assets with the Specified Media Type - -The following describes how to obtain images. - -**How to Develop** - -To specify the media type as the retrieval condition, set **selections** to **FileKey.MEDIA_TYPE**. - -To specify the image as the media type, set **selectionArgs** to **MediaType.IMAGE**. - -```ts -async function example() { - let fileKeyObj = mediaLibrary.FileKey; - let fileType = mediaLibrary.MediaType.IMAGE; - let option = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [fileType.toString()], - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const fetchFileResult = await media.getFileAssets(option); - fetchFileResult.getFirstObject().then(async (fileAsset) => { - console.log('getFirstObject.displayName : ' + fileAsset.displayName); - for (let i = 1; i < fetchFileResult.getCount(); i++) { - let fileAsset = await fetchFileResult.getNextObject(); - console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName); - } - }).catch((err) => { - console.error('Failed to get first object: ' + err); - }); -} -``` - -### Querying Media Assets with the Specified Date - -The following describes how to obtain all the media assets that are added from the specified date. You can also use the modification date and shooting date as the retrieval conditions. - -To specify the date when the files are added as the retrieval condition, set **selections** to **FileKey.DATE_ADDED**. - -To specify the date 2022-8-5, set **selectionArgs** to **2022-8-5**. - -```ts -async function example() { - let fileKeyObj = mediaLibrary.FileKey; - let option = { - selections: fileKeyObj.DATE_ADDED + '> ?', - selectionArgs: ['2022-8-5'], - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const fetchFileResult = await media.getFileAssets(option); - fetchFileResult.getFirstObject().then(async (fileAsset) => { - console.info('getFirstObject.displayName : ' + fileAsset.displayName); - for (let i = 1; i < fetchFileResult.getCount(); i++) { - let fileAsset = await fetchFileResult.getNextObject(); - console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName); - } - }).catch((err) => { - console.error('Failed to get first object: ' + err); - }); -} -``` - -### Querying Media Assets and Sorting Them - -The following describes how to query images and sort them in descending order by the date when they are added. You can also sort them in ascending order. - -To sort files in descending order by the date when they are added, set **order** to **FileKey.DATE_ADDED + " DESC"**. - -```ts -async function example() { - let fileKeyObj = mediaLibrary.FileKey; - let fileType = mediaLibrary.MediaType.IMAGE; - let option = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [fileType.toString()], - order: fileKeyObj.DATE_ADDED + " DESC", - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const fetchFileResult = await media.getFileAssets(option); - fetchFileResult.getFirstObject().then(async (fileAsset) => { - console.info('getFirstObject.displayName : ' + fileAsset.displayName); - for (let i = 1; i < fetchFileResult.getCount(); i++) { - let fileAsset = await fetchFileResult.getNextObject(); - console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName); - } - }).catch((err) => { - console.error('Failed to get first object: ' + err); - }); -} -``` - -### Querying Media Assets with the Specified Album Name - -The following describes how to query media assets in **myAlbum**. - -To specify the album name as the retrieval condition, set **selections** to **FileKey.ALBUM_NAME**. - -To specify the album name **'myAlbum'**, set **selectionArgs** to **'myAlbum'**. - -```ts -async function example() { - let fileKeyObj = mediaLibrary.FileKey; - let option = { - selections: fileKeyObj.ALBUM_NAME + '= ?', - selectionArgs: ['myAlbum'], - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const fetchFileResult = await media.getFileAssets(option); - if (albumList.length > 0) { - fetchFileResult.getFirstObject().then((album) => { - console.info('getFirstObject.displayName : ' + album.albumName); - }).catch((err) => { - console.error('Failed to get first object: ' + err); - }); - } else { - console.info('getAlbum list is: 0'); - } -} -``` - -## Obtaining Images and Videos in an Album - -You can obtain media assets in an album in either of the following ways: -- Call [MediaLibrary.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-1) with an album specified, as described in [Querying Media Assets with the Specfied Album Name](#querying-media-assets-with-the-specified-album-name). -- Call [Album.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-3) to obtain an **Album** instance, so as to obtain the media assets in it. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.READ_MEDIA**. - -**How to Develop** - -The following describes how to obtain videos in an album named **New Album 1**. - -1. Create a retrieval condition for obtaining the target **Album** instance. - -```ts -let fileKeyObj = mediaLibrary.FileKey; -let AlbumNoArgsFetchOp = { - selections: fileKeyObj.ALBUM_NAME + '= ?', - selectionArgs:['New Album 1'] -} -``` - -2. Create a retrieval condition for obtaining videos in the target album. - -```ts -let fileKeyObj = mediaLibrary.FileKey; -let videoType = mediaLibrary.MediaType.VIDEO; -let videoFetchOp = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [videoType.toString()], -} -``` - -3. Call **Album.getFileAssets** to obtain the videos in the target album. - -Complete sample code: - -```ts -async function getCameraImagePromise() { - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let fileKeyObj = mediaLibrary.FileKey; - let videoType = mediaLibrary.MediaType.VIDEO; - let videoFetchOp = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [videoType.toString()], - } - let AlbumNoArgsFetchOp = { - selections: fileKeyObj.ALBUM_NAME + '= ?', - selectionArgs:['New Album 1'] - } - - let albumList = await media.getAlbums(AlbumNoArgsFetchOp); - if (albumList.length > 0) { - const album = albumList[0]; - let fetchFileResult = await album.getFileAssets(videoFetchOp); - let count = fetchFileResult.getCount(); - console.info("get mediaLibrary VIDEO number", count); - } else { - console.info('getAlbum list is: 0'); - } -} -``` - -## Obtaining the Thumbnail of an Image or a Video - -You can call [FileAsset.getThumbnail](../reference/apis/js-apis-medialibrary.md#getthumbnail8-2) with the thumbnail size passed in to obtain the thumbnail of an image or a video. Your application can use thumbnails to offer a quick preview on images and videos. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.READ_MEDIA**. - -### Obtaining the Thumbnail of an Image - -The following describes how to obtain the thumbnail (size: 720 x 720) of the first image in the album. - -**How to Develop** - -1. Create a retrieval condition for obtaining images in the target album. -2. Call **getFileAssets** to obtain the images in the target album. -3. Call **getFirstObject** to obtain the first image among all the images obtained. -4. Call **getThumbnail** to obtain the thumbnail of the first image. - -```ts -async function getFirstThumbnailPromise() { - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - let fileKeyObj = mediaLibrary.FileKey; - let imageType = mediaLibrary.MediaType.IMAGE; - let imagesFetchOp = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [imageType.toString()], - } - - let size = { width: 720, height: 720 }; - const fetchFileResult = await media.getFileAssets(imagesFetchOp); - if (fetchFileResult === undefined) { - console.error("get image failed with error"); - return; - } else { - const asset = await fetchFileResult.getFirstObject(); - asset.getThumbnail(size).then((pixelMap) => { - pixelMap.getImageInfo().then((info) => { - console.info('get Thumbnail info: ' + "width: " + info.size.width + " height: " + info.size.height); - }).catch((err) => { - console.error("getImageInfo failed with error: " + err); - }); - }).catch((err) => { - console.error("getImageInfo failed with error: " + err); - }); - } -} -``` - -## Creating a Media Asset - -You can call [MediaLibrary.createAsset](../reference/apis/js-apis-medialibrary.md#createasset8-1) to create a media asset. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.WRITE_MEDIA**. -- [You have obtained a public directory](medialibrary-filepath-guidelines.md). - -The following describes how to create a file of the **MediaType.FILE** type. - -```ts -async function example() { - let mediaType = mediaLibrary.MediaType.FILE; - let DIR_DOCUMENTS = mediaLibrary.DirectoryType.DIR_DOCUMENTS; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const path = await media.getPublicDirectory(DIR_DOCUMENTS); - media.createAsset(mediaType, "testFile.text", path).then((asset) => { - console.info("createAsset successfully:"+ JSON.stringify(asset)); - }).catch((err) => { - console.error("createAsset failed with error: " + err); - }); -} -``` - -## Moving a Media Asset to the Recycle Bin - -You can use [FileAsset.trash](../reference/apis/js-apis-medialibrary.md#trash8) to move a media asset to the recycle bin. - -By default, files in the recycle bin will be stored for 30 days before being permanently removed. During this period, you can set **isTrash** in **trash** to **false** to recover the files. Application users can also recover the files through the system applications **Files** or **Gallery**. - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.WRITE_MEDIA**. - -The following describes how to move the first file in the result set to the recycle bin. - -**How to Develop** - -1. Create a retrieval condition for obtaining images in the target album. -2. Call **getFileAssets** to obtain the images in the target album. -3. Call **getFirstObject** to obtain the first image among all the images obtained. -4. Call **trash** to move the first image to the recycle bin. - -```ts -async function example() { - let fileKeyObj = mediaLibrary.FileKey; - let fileType = mediaLibrary.MediaType.FILE; - let option = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [fileType.toString()], - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const fetchFileResult = await media.getFileAssets(option); - let asset = await fetchFileResult.getFirstObject(); - if (asset === undefined) { - console.error('asset not exist'); - return; - } - // Void callback. - asset.trash(true).then(() => { - console.info("trash successfully"); - }).catch((err) => { - console.error("trash failed with error: " + err); - }); -} -``` - -## Renaming a Media Asset - -To rename a media asset, modify the **FileAsset.displayName** attribute (which specifies the displayed file name, including the file name extension) and commit the modification through [FileAsset.commitModify](../reference/apis/js-apis-medialibrary.md#commitmodify8-1). - -Before renaming a file, you must obtain the file, for example, by calling [FetchFileResult](../reference/apis/js-apis-medialibrary.md#fetchfileresult7). - -**Prerequisites** - -- You have obtained a **MediaLibrary** instance. -- You have granted the permission **ohos.permission.WRITE_MEDIA**. - -The following describes how to rename the first file in the result set as **newImage.jpg**. - -**How to Develop** - -1. Create a retrieval condition for obtaining images in the target album. -2. Call **getFileAssets** to obtain the images in the target album. -3. Call **getFirstObject** to obtain the first image among all the images obtained. -4. Rename the image as **newImage.jpg**. -5. Call **FileAsset.commitModify** to commit the modification to the database. - -```ts -async function example() { - let fileKeyObj = mediaLibrary.FileKey; - let fileType = mediaLibrary.MediaType.IMAGE; - let option = { - selections: fileKeyObj.MEDIA_TYPE + '= ?', - selectionArgs: [fileType.toString()], - }; - const context = getContext(this); - let media = mediaLibrary.getMediaLibrary(context); - const fetchFileResult = await media.getFileAssets(option); - let asset = await fetchFileResult.getFirstObject(); - if (asset === undefined) { - console.error('asset not exist'); - return; - } - asset.displayName = 'newImage.jpg'; - // Void callback. - asset.commitModify((err) => { - if (err) { - console.error('fileRename Failed '); - return; - } - console.info('fileRename successful.'); - }); -} -``` diff --git a/en/application-dev/file-management/save-user-file.md b/en/application-dev/file-management/save-user-file.md new file mode 100644 index 0000000000000000000000000000000000000000..d1ca80444deffa2bad38f01442e0135e20ac67c3 --- /dev/null +++ b/en/application-dev/file-management/save-user-file.md @@ -0,0 +1,103 @@ +# Saving User Files + +When a user needs to download a file from the network to a local directory or save a user file into another directory, use **FilePicker** to save the file. + +The operations for saving images, audio or video clips, and documents are similar. Call **save()** of the corresponding picker instance and pass in **saveOptions**. + + +## Saving Images or Video Files + +1. Import the **FilePicker** module. + + ```ts + import picker from '@ohos.file.picker'; + ``` + +2. Create a **photoSaveOptions** instance. + + ```ts + const photoSaveOptions = new picker.PhotoSaveOptions(); // Create a photoSaveOptions instance. + photoSaveOptions.newFileNames = ["PhotoViewPicker01.jpg"]; // (Optional) Set the names of the files to save. + ``` + +3. Create a **photoViewPicker** instance and call [save()](../reference/apis/js-apis-file-picker.md#save) to open the **FilePicker** page to save the files. + After the user selects the target folder, the file saving operation is complete. After the files are saved successfully, the URIs of the files saved are returned. + + ```ts + const photoViewPicker = new picker.PhotoViewPicker(); + photoViewPicker.save(photoSaveOptions) + .then(async (photoSaveResult) => { + let uri = photoSaveResult[0]; + // Perform operations on the files based on the file URIs obtained. + }) + .catch((err) => { + console.error(`Invoke documentPicker.select failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +## Saving Documents + +1. Import the **FilePicker** module. + + ```ts + import picker from '@ohos.file.picker'; + ``` + +2. Create a **documentSaveOptions** instance. + + ```ts + const documentSaveOptions = new picker.DocumentSaveOptions(); // Create a documentSaveOptions instance. + documentSaveOptions.newFileNames = ["DocumentViewPicker01.txt"]; // (Optional) Set the names of the documents to save. + ``` + +3. Create a **documentViewPicker** instance, and call [save()](../reference/apis/js-apis-file-picker.md#save-3) to open the **FilePicker** page to save the documents. + After the user selects the target folder, the file saving operation is complete. After the files are saved successfully, the URIs of the files saved are returned. + + > **NOTE** + > + > Currently, **DocumentSelectOptions** is not configurable. By default, all types of user files are selected. + + ```ts + const documentViewPicker = new picker.DocumentViewPicker(); // Create a documentViewPicker instance. + documentViewPicker.save(documentSaveOptions) + .then(async (documentSaveResult) => { + let uri = documentSaveResult[0]; + // For example, write data to the documents based on the obtained URIs. + }) + .catch((err) => { + console.error(`Invoke documentPicker.save failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +## Saving Audio Files + +1. Import the **FilePicker** module. + + ```ts + import picker from '@ohos.file.picker'; + ``` + +2. Create an **audioSaveOptions** instance. + + ```ts + const audioSaveOptions = new picker.AudioSaveOptions(); // Create an audioSaveOptions instance. + audioSaveOptions.newFileNames = ['AudioViewPicker01.mp3']; // (Optional) Set the names of the files to save. + ``` + +3. Create an **audioViewPicker** instance, and call [save()](../reference/apis/js-apis-file-picker.md#save-6) to open the **FilePicker** page to save the files. + After the user selects the target folder, the file saving operation is complete. After the files are saved successfully, the URIs of the files saved are returned. + > **NOTE** + > + > Currently, **AudioSelectOptions** is not configurable. By default, all types of user files are selected. + + ```ts + const audioViewPicker = new picker.AudioViewPicker(); + audioViewPicker.save(audioSaveOptions) + .then((audioSelectResult) => { + let uri = audioSelectResult[0]; + // Perform operations on the audio files based on the file URIs. + }) + .catch((err) => { + console.error(`Invoke audioPicker.select failed, code is ${err.code}, message is ${err.message}`); + }) + ``` diff --git a/en/application-dev/file-management/select-user-file.md b/en/application-dev/file-management/select-user-file.md new file mode 100644 index 0000000000000000000000000000000000000000..77fc2dd23c080c357d1749df4bb3ca551cba3a0d --- /dev/null +++ b/en/application-dev/file-management/select-user-file.md @@ -0,0 +1,117 @@ +# Selecting User Files + +If your application needs to support share and saving of user files (such as images and videos) by users, you can use the [FilePicker](../reference/apis/js-apis-file-picker.md) prebuilt in OpenHarmony to implement selecting and saving of user files. + +The **FilePicker** provides the following interfaces by file type: + +- [**PhotoViewPicker**](../reference/apis/js-apis-file-picker.md#photoviewpicker): used to select and save images or video files. + +- [**DocumentViewPicker**](../reference/apis/js-apis-file-picker.md#documentviewpicker): used to select and save documents. + +- [**AudioViewPicker**](../reference/apis/js-apis-file-picker.md#audioviewpicker): used to select and save audio files. + +## Selecting Images or Video Files + +1. Import the **FilePicker** module. + + ```ts + import picker from '@ohos.file.picker'; + ``` + +2. Create a **photoSelectOptions** instance. + + ```ts + const photoSelectOptions = new picker.PhotoSelectOptions(); + ``` + +3. Set the file type and the maximum number of media files to select. + For example, select a maximum of five images. For details about the media file type, see [PhotoViewMIMETypes](../reference/apis/js-apis-file-picker.md#photoviewmimetypes). + + ```ts + photoSelectOptions.MIMEType = picker.PhotoViewMIMETypes.IMAGE_TYPE; // Select images. + photoSelectOptions.maxSelectNumber = 5; // Set the maximum number of images to select. + ``` + +4. Create a **photoPicker** instance and call [select()](../reference/apis/js-apis-file-picker.md#select) to open the **FilePicker** page for the user to select files. + + Use [PhotoSelectResult](../reference/apis/js-apis-file-picker.md#photoselectresult) to return a result set. Further operations on the selected files can be performed based on the file URIs in the result set. + + ```ts + const photoPicker = new picker.PhotoViewPicker(); + photoPicker.select(photoSelectOptions) + .then(async (photoSelectResult) => { + let uri = photoSelectResult.photoUris[0]; + // Perform operations on the files based on the file URIs obtained. + }) + .catch((err) => { + console.error(`Invoke documentPicker.select failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +## Selecting Documents + +1. Import the **FilePicker** module. + + ```ts + import picker from '@ohos.file.picker'; + ``` + +2. Create a **documentSelectOptions** instance. + + ```ts + const documentSelectOptions = new picker.DocumentSelectOptions(); + ``` + +3. Create a **documentViewPicker** instance, and call [**select()**](../reference/apis/js-apis-file-picker.md#select-3) to open the **FilePicker** page for the user to select documents. + After the documents are selected successfully, a result set containing the file URIs is returned. Further operations can be performed on the documents based on the file URIs. + > **NOTE** + > + > Currently, **DocumentSelectOptions** is not configurable. By default, all types of user files are selected. + + ```ts + const documentViewPicker = new picker.DocumentViewPicker(); // Create a documentViewPicker instance. + documentViewPicker.select(documentSelectOptions) + .then((documentSelectResult) => { + let uri = documentSelectResult[0]; + // Perform operations on the documents based on the file URIs. + }) + .catch((err) => { + console.error(`Invoke documentPicker.select failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +## Selecting an Audio File + +1. Import the **FilePicker** module. + + ```ts + import picker from '@ohos.file.picker'; + ``` + +2. Create an **audioSelectOptions** instance. + + ```ts + const audioSelectOptions = new picker.AudioSelectOptions(); + ``` + +3. Create an **audioViewPicker** instance, and call [**select()**](../reference/apis/js-apis-file-picker.md#select-6) to open the **FilePicker** page for the user to select audio files. + + After the files are selected successfully, a result set containing the URIs of the audio files selected is returned. Further operations can be performed on the documents based on the file URIs. + + For example, use the [file management interface](../reference/apis/js-apis-file-fs.md) to obtain the file handle (FD) of the audio clip based on the URI, and then develop the audio playback function based on the media service. For details, see [Audio Playback Development](../media/audio-playback-overview.md). + + > **NOTE** + > + > Currently, **AudioSelectOptions** is not configurable. By default, all types of user files are selected. + + ```ts + const audioViewPicker = new picker.AudioViewPicker(); + audioViewPicker.select(audioSelectOptions) + .then(audioSelectResult => { + let uri = audioSelectOptions[0]; + // Perform operations on the audio files based on the file URIs. + }) + .catch((err) => { + console.error(`Invoke audioPicker.select failed, code is ${err.code}, message is ${err.message}`); + }) + ``` diff --git a/en/application-dev/file-management/send-file-to-app-sandbox.md b/en/application-dev/file-management/send-file-to-app-sandbox.md new file mode 100644 index 0000000000000000000000000000000000000000..fdcee889e0ce2a8c51fb9db66a0c1d3e414a0d1b --- /dev/null +++ b/en/application-dev/file-management/send-file-to-app-sandbox.md @@ -0,0 +1,46 @@ +# Sending Files to an Application Sandbox + +During the development and debugging process of an application, you may need to place some files to the application sandbox for intra-application access or for testing purposes. In this case, you can use either of the following methods: + +1. Use DevEco Studio to place the files to the application installation directory. For details, see [Application Installation Resource Access](../quick-start/resource-categories-and-access.md# resource-access). + +2. Use the hdc tool to send files to the application sandbox directory on the device. This section describes the second method. + +However, the file directories visible to the debugged process in the hdc shell are different from the application sandbox directories visible to the application. You need to understand the mappings between the application sandbox directories and the physical (real) directories. + +## Mappings Between Application Sandbox Directories and Physical Directories + +The read and write operations performed based on the application sandbox paths via APIs are performed on the files in the physical directories after address conversion. The following table lists the mappings between application sandbox paths and physical paths. + +**Table 1** Mapping between application sandbox paths and physical paths + +| Application Sandbox Path| Physical Path in hdc| Description| +| -------- | -------- | -------- | +| /data/storage/el1/bundle | /data/app/el1/bundle/public/<PACKAGENAME> | Application installation package directory.| +| /data/storage/el1/base | /data/app/el1/<USERID>/base/<PACKAGENAME> | Application directory of encryption level (el) 1.| +| /data/storage/el2/base | /data/app/el2/<USERID>/base/<PACKAGENAME> | Application directory of el 2.| +| /data/storage/el1/database | /data/app/el1/<USERID>/database/<PACKAGENAME> | Database directory of the application under **el1/**.| +| /data/storage/el2/database | /data/app/el2/<USERID>/database/<PACKAGENAME> | Database directory of the application under **el2/**.| +| /data/storage/el2/distributedfiles | /mnt/hmdfs/<USERID>/account/merge_view/data/<PACKAGENAME> | Distributed data directory of the application under **el2/**.| + +## Development Example + +The following uses the application bundle **com.ohos.example** as an example. If the application sandbox path is **/data/storage/el1/bundle**, the physical path is **/data/app/el1/bundle/public/**, that is, **/data/app/el1/bundle/public/com.ohos.example**. + +Run the following command to send the file: + +``` +hdc file send ${Path of the local file to send} /data/app/el1/bundle/public/com.ohos.example/ +``` + +## Switching to the Application View + +During the debugging process, if you do not have the permission or the file does not exist, you need to switch from the process view to the application view and further analyze permission and directory problems. To switch to the application view, run the following commands: + +``` +hdc shell // Switch to shell. +ps -ef|grep [hapName] // Obtain the process identifier (PID) of the application. +nsenter -t [hapPid] -m /bin/sh // Enter the application sandbox environment based on the PID. +``` + +The application view is in use, and the path you see is the application sandbox path. diff --git a/en/application-dev/file-management/set-security-label.md b/en/application-dev/file-management/set-security-label.md new file mode 100644 index 0000000000000000000000000000000000000000..af819fba397d47f81b0ebe005e67f9e6c8ebef39 --- /dev/null +++ b/en/application-dev/file-management/set-security-label.md @@ -0,0 +1,41 @@ +# Setting the Security Level of a Distributed File + +The security capabilities vary with devices. For example, small embedded devices provide fewer security capabilities than tablets. The security requirements also vary with data. For example, personal health information and bank card information are not expected to be accessed by devices of lower security levels. OpenHarmony provides a complete set of standards for data and device classification and custom data transfer policies for different devices. For details, see [Data Security Labels and Device Security Levels](../database/access-control-by-device-and-data-level.md). + +## Available APIs + +For details about the APIs, see [ohos.file.securityLabel](../reference/apis/js-apis-file-securityLabel.md). + +**Table 1** APIs + +| API| Description| Type| Synchronous Programming| Asynchronous Programming| +| -------- | -------- | -------- | -------- | -------- | +| setSecurityLabel | Sets a security label for a file.| Method| √ | √ | +| getSecurityLabel | Obtains the security label of a file.| Method| √ | √ | + +> **NOTICE** +> +> 1. In distributed networking, a device can view the files that do not match its security level but cannot access them. +> +> 2. The default security level of the distributed file system data is S3. Applications can set the security level of files. + +## Development Example + +Obtain the sandbox path of the file and set the data security label. For details about how to obtain the context in the example, see [Obtaining the Context of UIAbility](../application-models/uiability-usage.md#obtaining-the-context-of-uiability). + + +```ts +import securityLabel from '@ohos.file.securityLabel'; + +//Obtain the sandbox path of the file. +let context =...; // Obtain UIAbilityContext information. +let pathDir = context.filesDir; +let filePath = pathDir + '/test.txt'; + +// Set the data level of the file to S0. +securityLabel.setSecurityLabel(filePath, 's0').then(() => { + console.info('Succeeded in setSecurityLabeling.'); +}).catch((err) => { + console.error(`Failed to setSecurityLabel. Code: ${err.code}, message: ${err.message}`); +}); +``` diff --git a/en/application-dev/file-management/share-app-file.md b/en/application-dev/file-management/share-app-file.md new file mode 100644 index 0000000000000000000000000000000000000000..d9ee1d90904f5cdb43cd1987a66b09668200bc81 --- /dev/null +++ b/en/application-dev/file-management/share-app-file.md @@ -0,0 +1,147 @@ +# Sharing an Application File + +The file of an application can be shared with another application based on the file descriptor (FD) or uniform resource identifier (URI) of the file. However, if the FD of a shared file is closed, the file cannot be opened. Therefore, the file sharing based on the FD is not recommended. This section describes how to share an application file based on its URI. + +- You can use **wantConstant.Flags()** of the [ohos.app.ability.wantConstant](../reference/apis/js-apis-app-ability-wantConstant.md#wantconstantflags) module to share an application file in read or read/write mode based on its URI with another application. The target application can use **open()** of the [ohos.file.fs](../reference/apis/js-apis-file-fs.md#fsopen) module to open the URI and then perform read and/or write operations based on the permissions granted. Currently, OpenHarmony API version 9 supports only temporary authorization. The permission on shared file is revoked once the target application exits. + +- You can also use **open()** of the ohos.file.fs module to share an application file with the specified permissions to another application based on the FD. After parsing the FD from **Want**, the target application can read and write the file by using **read()** and **write()** APIs of ohos.file.fs. + +You can use the related APIs to [share a file with another application](#sharing-a-file-with-another-application) or [use shared application files](#using-shared-files). + +## File URI Specifications + +The file URIs are in the following format: + + file://<bundleName>/<path>/\#networkid=<networkid> + +- **file**: indicates a file URI. + +- *bundleName*: specifies the owner of the file. + +- *path*: specifies the application sandbox path of the file. + +- *networkid* (optional): specifies the device to which the file belongs in a distributed file system. Leave this parameter unspecified if the file location does not need to be set. + +## Sharing a File with Another Application + +Before sharing application files, you need to [obtain the application file path](../application-models/application-context-stage.md#obtaining-the-application-development-path). + +1. Obtain the application sandbox path of the file and convert it into the file URI. + + ```ts + import UIAbility from '@ohos.app.ability.UIAbility'; + import fileuri from '@ohos.file.fileuri'; + import window from '@ohos.window'; + + export default class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage: window.WindowStage) { + // Obtain the application sandbox path of the file. + let pathInSandbox = this.context.filesDir + "/test.txt"; + // Convert the application sandbox path into a URI. + let uri = fileuri.getUriFromPath(pathInSandbox); + // The obtained URI is file://com.example.demo/data/storage/el2/base/files/test.txt. + } + } + ``` + +2. Set the target application, with which you want to share the file, and grant permissions on the file. + Use [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to share the file with the target application. You need to pass in the URI obtained in **uri** of the **want** parameter, set the type of the file to share, set **action** to **ohos.want.action.sendData**, and set the granted permission on the file in **flags**. For details, see [Want](../reference/apis/js-apis-app-ability-want.md#attributes). + + > **NOTE** + > + > The write permission granted includes the read permission. + + ```ts + import fileuri from '@ohos.file.fileuri'; + import window from '@ohos.window'; + import wantConstant from '@ohos.app.ability.wantConstant'; + import UIAbility from '@ohos.app.ability.UIAbility'; + + export default class EntryAbility extends UIAbility { + onWindowStageCreate(windowStage: window.WindowStage) { + // Obtain the application sandbox path of the file. + let filePath = this.context.filesDir + '/test.txt'; + // Convert the application sandbox path into a URI. + let uri = fileuri.getUriFromPath(filePath); + let want = { + // Grant the read and write permissions on the shared file to the target application. + flags: wantConstant.Flags.FLAG_AUTH_WRITE_URI_PERMISSION | wantConstant.Flags.FLAG_AUTH_READ_URI_PERMISSION, + // Set the implicit startup rule for the application that shares the file. + action: 'ohos.want.action.sendData', + uri: uri, + type: 'text/plain' + } + this.context.startAbility(want) + .then(() => { + console.info('Invoke getCurrentBundleStats succeeded.'); + }) + .catch((err) => { + console.error(`Invoke startAbility failed, code is ${err.code}, message is ${err.message}`); + }); + } + + ... + } + ``` + +## Using Shared Files + +In the [**module.json5** file](../quick-start/module-configuration-file.md) of the application, which wants to use the shared file, set **actions** to **ohos.want.action.sendData** to allow the application to receive files shared by another application and set **uris** to the type of the URI to receive. In the following example, the application receives only .txt files with **scheme** of **file**. + +```json +{ + "module": { + ... + "abilities": [ + { + ... + "skills": [ + { + ... + "actions": [ + "ohos.want.action.sendData" + ], + "uris": [ + { + "scheme": "file", + "type": "text/plain" + } + ] + } + ] + } + ] + } +} +``` + +After **UIAbility** of the application starts, the application obtains **want** information from [**onCreate()**](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) or [**onNewWant()**](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonnewwant). + +After obtaining the URI of the shared file through **want**, the application can call **fs.open()** to open the file, and then read and write the file after obtaining the related file object. + +```ts +// xxx.ets +import fs from '@ohos.file.fs'; + +function getShareFile() { + try { + let want =...; // Obtain the want information sent from the application that shares the file. + + // Obtain the uri field from the want information. + let uri = want.uri; + if (uri == null || uri == undefined) { + console.info('uri is invalid'); + return; + } + try { + // Perform operations on the URI of the shared file as required. For example, open the URI to obtain the file object in read/write mode. + let file = fs.openSync(uri, fs.OpenMode.READ_WRITE); + console.info('open file successfully!'); + } catch (error) { + console.error(`Invoke openSync failed, code is ${error.code}, message is ${error.message}`); + } + } catch (error) { + console.error(`Invoke openSync failed, code is ${error.code}, message is ${error.message}`); + } +} +``` diff --git a/en/application-dev/file-management/user-file-overview.md b/en/application-dev/file-management/user-file-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..599da1024166be5c00619aa71f19f6f10fd73f7f --- /dev/null +++ b/en/application-dev/file-management/user-file-overview.md @@ -0,0 +1,60 @@ +# User File Overview + +User files are the private images, video and audio clips, and documents of the user who logs in to the device. + +1. User files are stored in a directory, whose owner is the user who logs in to the device. + +2. User files can be stored in [built-in storage](#built-in-storage) and [external storage](#external-storage). + +3. An application cannot access user files without user authorization, or the operations on user files must be performed by the user. + +OpenHarmony provides the [user file access framework](#user-file-access-framework) for developers to access and manage user files, which will be described in detail below. + +## User File Storage + +### Built-in Storage + +Built-in storage refers to the internal storage device (space) of a device. The built-in storage device cannot be removed. The following files can be stored in the built-in storage of a device: + +- Files owned by a user: The files belong to the user who logs in to the device. Different users who log in to a device can view only their own files. + These user files can be classified into the following types based on file attributes and access habits of users/applications: + - Image/Video files + The files have attributes, such as the shooting time, location, rotation angle, and file width and height information, and are stored in media file formats. The files are usually presented as media files or albums, without the specific location in the system. + + - Audio files + The files have attributes, such as the album, creator, and shooting duration information, and are stored in media file formats. Generally, the files are presented by file, album, or creator, without the specific location in the system. + + - Documents + The files are stored as common files, including common text files, compressed files, and images, videos and audio clips stored as common files. These files are presented in a directory tree. + +- Files shared by users: The files are stored in a directory for share and shared by multiple users. + The files in the shared directory are stored as common files and presented in a directory tree. + +### External Storage + +External storage is not inside a device's main storage or memory. Common external storage devices include pluggable devices, such as SD cards and USB flash drives. Same as the files in the shared directory of the built-in storage device, the files in an external storage device can be viewed by all the users who log in to the system. + +External storage devices are pluggable. OpenHarmony provides the functions of listening for the device insertion and removal events and mounting/unmounting an external storage device. For details, see [Managing External Storage Devices)](manage-external-storage.md). + +The files on external storage devices are presented as common files in a directory tree, like the documents stored in built-in storage. + +## User File Access Framework + +OpenHarmony provides the user file access framework for developers to access and manage user files. This framework leverages the ExtensionAbility of OpenHarmony to provide a set of methods and interfaces for accessing user files. + +**Figure 1** User file access framework +![User file access framework](figures/user-file-access-framework.png) + +- To access user files, for example, select a photo or save multiple documents, a system application or third-party application (file access client in **Figure 1**) starts the **FilePicker** application. + +- OpenHarmony is prebuilt with the **FilePicker** and **FileManager** applications. + - **FilePicker**: provides APIs for a file access client to select and save user files without any permission. For details, see [Selecting User Files](select-user-file.md). + - **FileManager**: allows users to view and modify files, and delete, rename, move, and create files or directories by using a system FileManager. + + You can also develop your own FilePicker or FileManager applications as required. FilePicker is a subset of FileManager. For details about how to develop a FileManager application, see [Developing a FileManager Application)](dev-user-file-manager.md). + +- The user file access framework provides the following functional modules: + - **File Access Helper**: provides APIs for the **FileManager** and **FilePicker** to access user files. + - **File Access ExtensionAbility**: provides a file access framework to implement file access functions. The **File Access ExtensionAbility** consists of the following: + - **UserFileManager**: implements management of the files stored on the built-in storage. + - **ExternalFileManager**: implements management of the files stored on the external storage. diff --git a/en/application-dev/media/Readme-EN.md b/en/application-dev/media/Readme-EN.md index f6902595cadbea27765ebf1812544821b3c68a09..efc78832291fda395506dc0864af4fae0f068621 100755 --- a/en/application-dev/media/Readme-EN.md +++ b/en/application-dev/media/Readme-EN.md @@ -1,29 +1,60 @@ # Media +- [Media Application Overview](media-application-overview.md) - Audio and Video - - [Audio Overview](audio-overview.md) - - [Audio Rendering Development](audio-renderer.md) - - [Audio Stream Management Development](audio-stream-manager.md) - - [Audio Capture Development](audio-capturer.md) - - [OpenSL ES Audio Playback Development](opensles-playback.md) - - [OpenSL ES Audio Recording Development](opensles-capture.md) - - [Audio Interruption Mode Development](audio-interruptmode.md) - - [Volume Management Development](audio-volume-manager.md) - - [Audio Routing and Device Management Development](audio-routing-manager.md) - - [AVPlayer Development (Recommended)](avplayer-playback.md) - - [AVRecorder Development (Recommended)](avrecorder.md) - - [Audio Playback Development (To Be Deprecated Soon)](audio-playback.md) - - [Audio Recording Development (To Be Deprecated Soon)](audio-recorder.md) - - [Video Playback Development (To Be Deprecated Soon)](video-playback.md) - - [Video Recording Development (To Be Deprecated Soon)](video-recorder.md) - -- AVSession + - [Audio and Video Overview](av-overview.md) + - [AVPlayer and AVRecorder](avplayer-avrecorder-overview.md) + - Audio Playback + - [Audio Playback Overview](audio-playback-overview.md) + - [Using AVPlayer for Audio Playback](using-avplayer-for-playback.md) + - [Using AudioRenderer for Audio Playback](using-audiorenderer-for-playback.md) + - [Using OpenSL ES for Audio Playback](using-opensl-es-for-playback.md) + - [Using TonePlayer for Audio Playback (for System Applications Only)](using-toneplayer-for-playback.md) + - [Audio Playback Concurrency Policy](audio-playback-concurrency.md) + - [Volume Management](volume-management.md) + - [Audio Playback Stream Management](audio-playback-stream-management.md) + - [Audio Output Device Management](audio-output-device-management.md) + - [Distributed Audio Playback (for System Applications Only)](distributed-audio-playback.md) + - Audio Recording + - [Audio Recording Overview](audio-recording-overview.md) + - [Using AVRecorder for Audio Recording](using-avrecorder-for-recording.md) + - [Using AudioCapturer for Audio Recording](using-audiocapturer-for-recording.md) + - [Using OpenSL ES for Audio Recording](using-opensl-es-for-recording.md) + - [Microphone Management](mic-management.md) + - [Audio Recording Stream Management](audio-recording-stream-management.md) + - [Audio Input Device Management](audio-input-device-management.md) + - Audio Call + - [Audio Call Overview](audio-call-overview.md) + - [Developing Audio Call](audio-call-development.md) + - [Video Playback](video-playback.md) + - [Video Recording](video-recording.md) +- AVSession (for System Applications Only) - [AVSession Overview](avsession-overview.md) - - [AVSession Development](avsession-guidelines.md) - + - Local AVSession + - [Local AVSession Overview](local-avsession-overview.md) + - [AVSession Provider](using-avsession-developer.md) + - [AVSession Controller](using-avsession-controller.md) + - Distributed AVSession + - [Distributed AVSession Overview](distributed-avsession-overview.md) + - [Using Distributed AVSession](using-distributed-avsession.md) +- Camera (for System Applications Only) + - [Camera Overview](camera-overview.md) + - Camera Development + - [Camera Development Preparations](camera-preparation.md) + - [Device Input Management](camera-device-input.md) + - [Session Management](camera-session-management.md) + - [Camera Preview](camera-preview.md) + - [Camera Photographing](camera-shooting.md) + - [Camera Recording](camera-recording.md) + - [Camera Metadata](camera-metadata.md) + - Best Practices + - [Camera Photographing Sample](camera-shooting-case.md) + - [Camera Recording Sample](camera-recording-case.md) - Image - - [Image Development](image.md) - -- Camera - - [Camera Development](camera.md) - - [Distributed Camera Development](remote-camera.md) + - [Image Overview](image-overview.md) + - [Image Decoding](image-decoding.md) + - Image Processing + - [Image Transformation](image-transformation.md) + - [Pixel Map Operation](image-pixelmap-operation.md) + - [Image Encoding](image-encoding.md) + - [Image Tool](image-tool.md) diff --git a/en/application-dev/media/audio-call-development.md b/en/application-dev/media/audio-call-development.md new file mode 100644 index 0000000000000000000000000000000000000000..8234c837c2ce985c2a1a7dc91c7e0002fb3d4a69 --- /dev/null +++ b/en/application-dev/media/audio-call-development.md @@ -0,0 +1,259 @@ +# Developing Audio Call + +During an audio call, audio output (playing the peer voice) and audio input (recording the local voice) are carried out simultaneously. You can use the AudioRenderer to implement audio output and the AudioCapturer to implement audio input. + +Before starting or stopping using the audio call service, the application needs to check the [audio scene](audio-call-overview.md#audio-scene) and [ringer mode](audio-call-overview.md#ringer-mode) to adopt proper audio management and prompt policies. + +The sample code below demonstrates the basic process of using the AudioRenderer and AudioCapturer to implement the audio call service, without the process of call data transmission. In actual development, the peer call data transmitted over the network needs to be decoded and played, and the sample code uses the process of reading an audio file instead; the local call data needs to be encoded and packed and then sent to the peer over the network, and the sample code uses the process of writing an audio file instead. + +## Using AudioRenderer to Play the Peer Voice + +This process is similar to the process of [using AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md). The key differences lie in the **audioRendererInfo** parameter and audio data source. In the **audioRendererInfo** parameter used for audio calling, **content** must be set to **CONTENT_TYPE_SPEECH**, and **usage** must be set to **STREAM_USAGE_VOICE_COMMUNICATION**. + +```ts +import audio from '@ohos.multimedia.audio'; +import fs from '@ohos.file.fs'; +const TAG = 'VoiceCallDemoForAudioRenderer'; +// The process is similar to the process of using AudioRenderer to develop audio playback. The key differences lie in the audioRendererInfo parameter and audio data source. +export default class VoiceCallDemoForAudioRenderer { + private renderModel = undefined; + private audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // Sampling rate. + channels: audio.AudioChannel.CHANNEL_2, // Channel. + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format. + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format. + } + private audioRendererInfo = { + // Parameters corresponding to the call scenario need to be used. + content: audio.ContentType.CONTENT_TYPE_SPEECH, // Audio content type: speech. + usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // Audio stream usage type: voice communication. + rendererFlags: 0 // AudioRenderer flag. The default value is 0. + } + private audioRendererOptions = { + streamInfo: this.audioStreamInfo, + rendererInfo: this.audioRendererInfo + } + // Create an AudioRenderer instance, and set the events to listen for. + init() { + audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // Create an AudioRenderer instance. + if (!err) { + console.info(`${TAG}: creating AudioRenderer success`); + this.renderModel = renderer; + this.renderModel.on('stateChange', (state) => { // Set the events to listen for. A callback is invoked when the AudioRenderer is switched to the specified state. + if (state == 1) { + console.info('audio renderer state is: STATE_PREPARED'); + } + if (state == 2) { + console.info('audio renderer state is: STATE_RUNNING'); + } + }); + this.renderModel.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of rendered frames reaches 1000. + if (position == 1000) { + console.info('ON Triggered successfully'); + } + }); + } else { + console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`); + } + }); + } + // Start audio rendering. + async start() { + let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; + if (stateGroup.indexOf(this.renderModel.state) === -1) { // Rendering can be started only when the AudioRenderer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state. + console.error(TAG + 'start failed'); + return; + } + await this.renderModel.start(); // Start rendering. + const bufferSize = await this.renderModel.getBufferSize(); + // The process of reading audio file data is used as an example. In actual audio call development, audio data transmitted from the peer needs to be read. + let context = getContext(this); + let path = context.filesDir; + + const filePath = path + '/voice_call_data.wav'; // Sandbox path. The actual path is /data/storage/el2/base/haps/entry/files/voice_call_data.wav. + let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); + let stat = await fs.stat(filePath); + let buf = new ArrayBuffer(bufferSize); + let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); + for (let i = 0; i < len; i++) { + let options = { + offset: i * bufferSize, + length: bufferSize + }; + let readsize = await fs.read(file.fd, buf, options); + // buf indicates the audio data to be written to the buffer. Before calling AudioRenderer.write(), you can preprocess the audio data for personalized playback. The AudioRenderer reads the audio data written to the buffer for rendering. + let writeSize = await new Promise((resolve, reject) => { + this.renderModel.write(buf, (err, writeSize) => { + if (err) { + reject(err); + } else { + resolve(writeSize); + } + }); + }); + if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // The rendering stops if the AudioRenderer is in the STATE_RELEASED state. + fs.close(file); + await this.renderModel.stop(); + } + if (this.renderModel.state === audio.AudioState.STATE_RUNNING) { + if (i === len - 1) { // The rendering stops if the file finishes reading. + fs.close(file); + await this.renderModel.stop(); + } + } + } + } + // Pause the rendering. + async pause() { + // Rendering can be paused only when the AudioRenderer is in the STATE_RUNNING state. + if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) { + console.info('Renderer is not running'); + return; + } + await this.renderModel.pause(); // Pause rendering. + if (this.renderModel.state === audio.AudioState.STATE_PAUSED) { + console.info('Renderer is paused.'); + } else { + console.error('Pausing renderer failed.'); + } + } + // Stop rendering. + async stop() { + // Rendering can be stopped only when the AudioRenderer is in the STATE_RUNNING or STATE_PAUSED state. + if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) { + console.info('Renderer is not running or paused.'); + return; + } + await this.renderModel.stop(); // Stop rendering. + if (this.renderModel.state === audio.AudioState.STATE_STOPPED) { + console.info('Renderer stopped.'); + } else { + console.error('Stopping renderer failed.'); + } + } + // Release the instance. + async release() { + // The AudioRenderer can be released only when it is not in the STATE_RELEASED state. + if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { + console.info('Renderer already released'); + return; + } + await this.renderModel.release(); // Release the instance. + if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { + console.info('Renderer released'); + } else { + console.error('Renderer release failed.'); + } + } +} +``` + +## Using AudioCapturer to Record the Local Voice + +This process is similar to the process of [using AudioCapturer to develop audio recording](using-audiocapturer-for-recording.md). The key differences lie in the **audioCapturerInfo** parameter and audio data stream direction. In the **audioCapturerInfo** parameter used for audio calling, **source** must be set to **SOURCE_TYPE_VOICE_COMMUNICATION**. + +```ts +import audio from '@ohos.multimedia.audio'; +import fs from '@ohos.file.fs'; +const TAG = 'VoiceCallDemoForAudioCapturer'; +// The process is similar to the process of using AudioCapturer to develop audio recording. The key differences lie in the audioCapturerInfo parameter and audio data stream direction. +export default class VoiceCallDemoForAudioCapturer { + private audioCapturer = undefined; + private audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // Sampling rate. + channels: audio.AudioChannel.CHANNEL_1, // Channel. + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format. + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format. + } + private audioCapturerInfo = { + // Parameters corresponding to the call scenario need to be used. + source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // Audio source type: voice communication. + capturerFlags: 0 // AudioCapturer flag. The default value is 0. + } + private audioCapturerOptions = { + streamInfo: this.audioStreamInfo, + capturerInfo: this.audioCapturerInfo + } + // Create an AudioCapturer instance, and set the events to listen for. + init() { + audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // Create an AudioCapturer instance. + if (err) { + console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`); + return; + } + console.info(`${TAG}: create AudioCapturer success`); + this.audioCapturer = capturer; + this.audioCapturer.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of captured frames reaches 1000. + if (position === 1000) { + console.info('ON Triggered successfully'); + } + }); + this.audioCapturer.on('periodReach', 2000, (position) => { // Subscribe to the periodReach event. A callback is triggered when the number of captured frames reaches 2000. + if (position === 2000) { + console.info('ON Triggered successfully'); + } + }); + }); + } + // Start audio recording. + async start() { + let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; + if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // Recording can be started only when the AudioRenderer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state. + console.error(`${TAG}: start failed`); + return; + } + await this.audioCapturer.start(); // Start recording. + // The following describes how to write audio data to a file. In actual audio call development, the local audio data needs to be encoded and packed, and then sent to the peer through the network. + let context = getContext(this); + const path = context.filesDir + '/voice_call_data.wav'; // Path for storing the recorded audio file. + let file = fs.openSync(path, 0o2 | 0o100); // Create the file if it does not exist. + let fd = file.fd; + let numBuffersToCapture = 150; // Write data for 150 times. + let count = 0; + while (numBuffersToCapture) { + let bufferSize = await this.audioCapturer.getBufferSize(); + let buffer = await this.audioCapturer.read(bufferSize, true); + let options = { + offset: count * bufferSize, + length: bufferSize + }; + if (buffer === undefined) { + console.error(`${TAG}: read buffer failed`); + } else { + let number = fs.writeSync(fd, buffer, options); + console.info(`${TAG}: write date: ${number}`); + } + numBuffersToCapture--; + count++; + } + } + // Stop recording. + async stop() { + // The AudioCapturer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state. + if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) { + console.info('Capturer is not running or paused'); + return; + } + await this.audioCapturer.stop(); // Stop recording. + if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) { + console.info('Capturer stopped'); + } else { + console.error('Capturer stop failed'); + } + } + // Release the instance. + async release() { + // The AudioCapturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state. + if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) { + console.info('Capturer already released'); + return; + } + await this.audioCapturer.release(); // Release the instance. + if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) { + console.info('Capturer released'); + } else { + console.error('Capturer release failed'); + } + } +} +``` diff --git a/en/application-dev/media/audio-call-overview.md b/en/application-dev/media/audio-call-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..1462198c201203da3eecc902de556c005ad3aae9 --- /dev/null +++ b/en/application-dev/media/audio-call-overview.md @@ -0,0 +1,49 @@ +# Audio Call Development + +Typically, audio calls are classified into VoIP calls and cellular calls. + +- Voice over Internet Protocol (VoIP) is a technology that enables you to make voice calls using a broadband Internet connection. During a VoIP call, call information is packed into data packets and transmitted over the network. Therefore, the VoIP call has high requirements on the network quality, and the call quality is closely related to the network connection speed. + +- Cellular call refers to the traditional telephony service provided by carriers. Currently, APIs for developing cellular calling are available only for system applications. + +When developing the audio call service, you must use a proper audio processing policy based on the [audio scene](#audio-scene) and [ringer mode](#ringer-mode). + +## Audio Scene + +When an application uses the audio call service, the system switches to the call-related audio scene (specified by [AudioScene](../reference/apis/js-apis-audio.md#audioscene8)). The system has preset multiple audio scenes, including ringing, cellular call, and voice chat, and uses a scene-specific policy to process audio. + +For example, in the cellular call audio scene, the system prioritizes voice clarity. To deliver a crystal clear voice during calls, the system uses the 3A algorithm to preprocess audio data, suppress echoes, eliminates background noise, and adjusts the volume range. The 3A algorithm refers to three audio processing algorithms: Acoustic Echo Cancellation (AEC), Active Noise Control (ANC), and Automatic Gain Control (AGC). + +Currently, the following audio scenes are preset: + +- **AUDIO_SCENE_DEFAULT**: default audio scene, which can be used in all scenarios except audio calls. + +- **AUDIO_SCENE_RINGING**: ringing audio scene, which is used when a call is coming and is open only to system applications. + +- **AUDIO_SCENE_PHONE_CALL**: cellular call audio scene, which is used for cellular calls and is open only to system applications. + +- **AUDIO_SCENE_VOICE_CHAT**: voice chat scene, which is used for VoIP calls. + +The application can call **getAudioScene** in the [AudioManager](../reference/apis/js-apis-audio.md#audiomanager) class to obtain the audio scene in use. Before starting or stopping using the audio call service, the application can call this API to check whether the system has switched to the suitable audio scene. + +## Ringer Mode + +When an audio call is coming, the application notifies the user by playing a ringtone or vibrating, depending on the setting of [AudioRingMode](../reference/apis/js-apis-audio.md#audioringmode). + +The system has preset the following ringer modes: + +- **RINGER_MODE_SILENT**: silent mode, in which no sound is played when a call is coming in. + +- **RINGER_MODE_VIBRATE**: vibration mode, in which no sound is played but the device vibrates when a call is coming in. + +- **RINGER_MODE_NORMAL**: normal mode, in which a ringtone is played when a call is coming in. + +The application can call **getRingerMode** in the [AudioVolumeGroupManager](../reference/apis/js-apis-audio.md#audiovolumegroupmanager9) class to obtain the ringer mode in use so as to use a proper policy to notify the user. + +If the application wants to obtain the ringer mode changes in time, it can call **on('ringerModeChange')** in the **AudioVolumeGroupManager** class to listen for the changes. When the ringer mode changes, it will receive a notification and can make adjustment accordingly. + +## Audio Device Switching During a Call + +When a call is coming, the system selects an appropriate audio device based on the default priority. The application can switch the call to another audio device as required. + +The audio devices that can be used for the audio call are specified by [CommunicationDeviceType](../reference/apis/js-apis-audio.md#communicationdevicetype9). The application can call **isCommunicationDeviceActive** in the [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9) class to check whether a communication device is active. It can also call **setCommunicationDevice** in the **AudioRoutingManager** class to set a communication device to the active state so that the device can be used for the call. diff --git a/en/application-dev/media/audio-capturer.md b/en/application-dev/media/audio-capturer.md deleted file mode 100644 index f7b01ce2a387af3471b297de329fe3267b9e9785..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-capturer.md +++ /dev/null @@ -1,258 +0,0 @@ -# Audio Capture Development - -## Introduction - -You can use the APIs provided by **AudioCapturer** to record raw audio files, thereby implementing audio data collection. - -**Status check**: During application development, you are advised to use **on('stateChange')** to subscribe to state changes of the **AudioCapturer** instance. This is because some operations can be performed only when the audio capturer is in a given state. If the application performs an operation when the audio capturer is not in the given state, the system may throw an exception or generate other undefined behavior. - -## Working Principles - -This following figure shows the audio capturer state transitions. - -**Figure 1** Audio capturer state transitions - -![audio-capturer-state](figures/audio-capturer-state.png) - -- **PREPARED**: The audio capturer enters this state by calling **create()**. -- **RUNNING**: The audio capturer enters this state by calling **start()** when it is in the **PREPARED** state or by calling **start()** when it is in the **STOPPED** state. -- **STOPPED**: The audio capturer in the **RUNNING** state can call **stop()** to stop playing audio data. -- **RELEASED**: The audio capturer in the **PREPARED** or **STOPPED** state can use **release()** to release all occupied hardware and software resources. It will not transit to any other state after it enters the **RELEASED** state. - -## Constraints - -Before developing the audio data collection feature, configure the **ohos.permission.MICROPHONE** permission for your application. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). - -## How to Develop - -For details about the APIs, see [AudioCapturer in Audio Management](../reference/apis/js-apis-audio.md#audiocapturer8). - -1. Use **createAudioCapturer()** to create a global **AudioCapturer** instance. - - Set parameters of the **AudioCapturer** instance in **audioCapturerOptions**. This instance is used to capture audio, control and obtain the recording state, and register a callback for notification. - - ```js - import audio from '@ohos.multimedia.audio'; - import fs from '@ohos.file.fs'; // It will be used for the call of the read function in step 3. - - // Perform a self-test on APIs related to audio rendering. - @Entry - @Component - struct AudioRenderer { - @State message: string = 'Hello World' - private audioCapturer: audio.AudioCapturer; // It will be called globally. - - async initAudioCapturer(){ - let audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - - let audioCapturerInfo = { - source: audio.SourceType.SOURCE_TYPE_MIC, - capturerFlags: 0 // 0 is the extended flag bit of the audio capturer. The default value is 0. - } - - let audioCapturerOptions = { - streamInfo: audioStreamInfo, - capturerInfo: audioCapturerInfo - } - - this.audioCapturer = await audio.createAudioCapturer(audioCapturerOptions); - console.log('AudioRecLog: Create audio capturer success.'); - } - - ``` - -2. Use **start()** to start audio recording. - - The capturer state will be **STATE_RUNNING** once the audio capturer is started. The application can then begin reading buffers. - - ```js - async startCapturer() { - let state = this.audioCapturer.state; - // The audio capturer should be in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state after being started. - if (state == audio.AudioState.STATE_PREPARED || state == audio.AudioState.STATE_PAUSED || - state == audio.AudioState.STATE_STOPPED) { - await this.audioCapturer.start(); - state = this.audioCapturer.state; - if (state == audio.AudioState.STATE_RUNNING) { - console.info('AudioRecLog: Capturer started'); - } else { - console.error('AudioRecLog: Capturer start failed'); - } - } - } - ``` - -3. Read the captured audio data and convert it to a byte stream. Call **read()** repeatedly to read the data until the application stops the recording. - - The following example shows how to write recorded data into a file. - - ```js - async readData(){ - let state = this.audioCapturer.state; - // The read operation can be performed only when the state is STATE_RUNNING. - if (state != audio.AudioState.STATE_RUNNING) { - console.info('Capturer is not in a correct state to read'); - return; - } - const path = '/data/data/.pulse_dir/capture_js.wav'; // Path for storing the collected audio file. - let file = fs.openSync(path, 0o2); - let fd = file.fd; - if (file !== null) { - console.info('AudioRecLog: file created'); - } else { - console.info('AudioRecLog: file create : FAILED'); - return; - } - if (fd !== null) { - console.info('AudioRecLog: file fd opened in append mode'); - } - let numBuffersToCapture = 150; // Write data for 150 times. - let count = 0; - while (numBuffersToCapture) { - this.bufferSize = await this.audioCapturer.getBufferSize(); - let buffer = await this.audioCapturer.read(this.bufferSize, true); - let options = { - offset: count * this.bufferSize, - length: this.bufferSize - } - if (typeof(buffer) == undefined) { - console.info('AudioRecLog: read buffer failed'); - } else { - let number = fs.writeSync(fd, buffer, options); - console.info(`AudioRecLog: data written: ${number}`); - } - numBuffersToCapture--; - count++; - } - } - ``` - -4. Once the recording is complete, call **stop()** to stop the recording. - - ```js - async StopCapturer() { - let state = this.audioCapturer.state; - // The audio capturer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state. - if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { - console.info('AudioRecLog: Capturer is not running or paused'); - return; - } - - await this.audioCapturer.stop(); - - state = this.audioCapturer.state; - if (state == audio.AudioState.STATE_STOPPED) { - console.info('AudioRecLog: Capturer stopped'); - } else { - console.error('AudioRecLog: Capturer stop failed'); - } - } - ``` - -5. After the task is complete, call **release()** to release related resources. - - ```js - async releaseCapturer() { - let state = this.audioCapturer.state; - // The audio capturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state. - if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { - console.info('AudioRecLog: Capturer already released'); - return; - } - - await this.audioCapturer.release(); - - state = this.audioCapturer.state; - if (state == audio.AudioState.STATE_RELEASED) { - console.info('AudioRecLog: Capturer released'); - } else { - console.info('AudioRecLog: Capturer release failed'); - } - } - ``` - -6. (Optional) Obtain the audio capturer information. - - You can use the following code to obtain the audio capturer information: - - ```js - async getAudioCapturerInfo(){ - // Obtain the audio capturer state. - let state = this.audioCapturer.state; - // Obtain the audio capturer information. - let audioCapturerInfo : audio.AudioCapturerInfo = await this.audioCapturer.getCapturerInfo(); - // Obtain the audio stream information. - let audioStreamInfo : audio.AudioStreamInfo = await this.audioCapturer.getStreamInfo(); - // Obtain the audio stream ID. - let audioStreamId : number = await this.audioCapturer.getAudioStreamId(); - // Obtain the Unix timestamp, in nanoseconds. - let audioTime : number = await this.audioCapturer.getAudioTime(); - // Obtain a proper minimum buffer size. - let bufferSize : number = await this.audioCapturer.getBufferSize(); - } - ``` - -7. (Optional) Use **on('markReach')** to subscribe to the mark reached event, and use **off('markReach')** to unsubscribe from the event. - - After the mark reached event is subscribed to, when the number of frames collected by the audio capturer reaches the specified value, a callback is triggered and the specified value is returned. - - ```js - async markReach(){ - this.audioCapturer.on('markReach', 10, (reachNumber) => { - console.info('Mark reach event Received'); - console.info(`The Capturer reached frame: ${reachNumber}`); - }); - this.audioCapturer.off('markReach'); // Unsubscribe from the mark reached event. This event will no longer be listened for. - } - ``` - -8. (Optional) Use **on('periodReach')** to subscribe to the period reached event, and use **off('periodReach')** to unsubscribe from the event. - - After the period reached event is subscribed to, each time the number of frames collected by the audio capturer reaches the specified value, a callback is triggered and the specified value is returned. - - ```js - async periodReach(){ - this.audioCapturer.on('periodReach', 10, (reachNumber) => { - console.info('Period reach event Received'); - console.info(`In this period, the Capturer reached frame: ${reachNumber}`); - }); - this.audioCapturer.off('periodReach'); // Unsubscribe from the period reached event. This event will no longer be listened for. - } - ``` - -9. If your application needs to perform some operations when the audio capturer state is updated, it can subscribe to the state change event. When the audio capturer state is updated, the application receives a callback containing the event type. - - ```js - async stateChange(){ - this.audioCapturer.on('stateChange', (state) => { - console.info(`AudioCapturerLog: Changed State to : ${state}`) - switch (state) { - case audio.AudioState.STATE_PREPARED: - console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------'); - console.info('Audio State is : Prepared'); - break; - case audio.AudioState.STATE_RUNNING: - console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------'); - console.info('Audio State is : Running'); - break; - case audio.AudioState.STATE_STOPPED: - console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------'); - console.info('Audio State is : stopped'); - break; - case audio.AudioState.STATE_RELEASED: - console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------'); - console.info('Audio State is : released'); - break; - default: - console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------'); - console.info('Audio State is : invalid'); - break; - } - }); - } - ``` diff --git a/en/application-dev/media/audio-input-device-management.md b/en/application-dev/media/audio-input-device-management.md new file mode 100644 index 0000000000000000000000000000000000000000..ebdadfaad7a9316cf055d3216ac3a94a1b052a33 --- /dev/null +++ b/en/application-dev/media/audio-input-device-management.md @@ -0,0 +1,88 @@ +# Audio Input Device Management + +If a device is connected to multiple audio input devices, you can use **AudioRoutingManager** to specify an audio input device to record audio. For details about the API reference, see [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9). + +## Creating an AudioRoutingManager Instance + +Before using **AudioRoutingManager** to manage audio devices, import the audio module and create an **AudioManager** instance. + +```ts +import audio from '@ohos.multimedia.audio'; // Import the audio module. + +let audioManager = audio.getAudioManager(); // Create an AudioManager instance. + +let audioRoutingManager = audioManager.getRoutingManager(); // Call an API of AudioManager to create an AudioRoutingManager instance. +``` + +## Supported Audio Input Device Types + +The table below lists the supported audio input devices. + +| Name| Value| Description| +| -------- | -------- | -------- | +| WIRED_HEADSET | 3 | Wired headset with a microphone.| +| BLUETOOTH_SCO | 7 | Bluetooth device using Synchronous Connection Oriented (SCO) links.| +| MIC | 15 | Microphone.| +| USB_HEADSET | 22 | USB Type-C headset.| + +## Obtaining Input Device Information + +Use **getDevices()** to obtain information about all the input devices. + +```ts +audioRoutingManager.getDevices(audio.DeviceFlag.INPUT_DEVICES_FLAG).then((data) => { + console.info('Promise returned to indicate that the device list is obtained.'); +}); +``` + +## Listening for Device Connection State Changes + +Set a listener to listen for changes of the device connection state. When a device is connected or disconnected, a callback is triggered. + +```ts +// Listen for connection state changes of audio devices. +audioRoutingManager.on('deviceChange', audio.DeviceFlag.INPUT_DEVICES_FLAG, (deviceChanged) => { + console.info('device change type: ' + deviceChanged.type); // Device connection state change. The value 0 means that the device is connected and 1 means that the device is disconnected. + console.info('device descriptor size : ' + deviceChanged.deviceDescriptors.length); + console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceRole); // Device role. + console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceType); // Device type. +}); + +// Cancel the listener for the connection state changes of audio devices. +audioRoutingManager.off('deviceChange', (deviceChanged) => { + console.info('Should be no callback.'); +}); +``` + +## Selecting an Audio Input Device (for System Applications only) + +Currently, only one input device can be selected, and the device ID is used as the unique identifier. For details about audio device descriptors, see [AudioDeviceDescriptors](../reference/apis/js-apis-audio.md#audiodevicedescriptors). + +> **NOTE** +> +> The user can connect to a group of audio devices (for example, a pair of Bluetooth headsets), but the system treats them as one device (a group of devices that share the same device ID). + +```ts +let inputAudioDeviceDescriptor = [{ + deviceRole : audio.DeviceRole.INPUT_DEVICE, + deviceType : audio.DeviceType.EARPIECE, + id : 1, + name : "", + address : "", + sampleRates : [44100], + channelCounts : [2], + channelMasks : [0], + networkId : audio.LOCAL_NETWORK_ID, + interruptGroupId : 1, + volumeGroupId : 1, +}]; + +async function getRoutingManager(){ + audioRoutingManager.selectInputDevice(inputAudioDeviceDescriptor).then(() => { + console.info('Invoke selectInputDevice succeeded.'); + }).catch((err) => { + console.error(`Invoke selectInputDevice failed, code is ${err.code}, message is ${err.message}`); + }); +} + +``` diff --git a/en/application-dev/media/audio-interruptmode.md b/en/application-dev/media/audio-interruptmode.md deleted file mode 100644 index 48a53bf5d5990ac88aae1271466a6aa36d52ac98..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-interruptmode.md +++ /dev/null @@ -1,55 +0,0 @@ -# Audio Interruption Mode Development - -## Introduction -The audio interruption mode is used to control the playback of multiple audio streams. - -Audio applications can set the audio interruption mode to independent or shared under **AudioRenderer**. - -In shared mode, multiple audio streams share one session ID. In independent mode, each audio stream has an independent session ID. - -**Asynchronous operation**: To prevent the UI thread from being blocked, most **AudioRenderer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the promise functions. - -## How to Develop - -For details about the APIs, see [AudioRenderer in Audio Management](../reference/apis/js-apis-audio.md#audiorenderer8). - -1. Use **createAudioRenderer()** to create an **AudioRenderer** instance. - - Set parameters of the **AudioRenderer** instance in **audioRendererOptions**. - - This instance is used to render audio, control and obtain the rendering status, and register a callback for notification. - -```js - import audio from '@ohos.multimedia.audio'; - - var audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - - var audioRendererInfo = { - content: audio.ContentType.CONTENT_TYPE_SPEECH, - usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, - rendererFlags: 1 - } - - var audioRendererOptions = { - streamInfo: audioStreamInfo, - rendererInfo: audioRendererInfo - } - -let audioRenderer = await audio.createAudioRenderer(audioRendererOptions); - ``` - -2. Set the audio interruption mode. - - After the **AudioRenderer** instance is initialized, you can set the audio interruption mode.
- - ```js - var mode_ = audio.InterruptMode.SHARE_MODE; - await this.audioRenderer.setInterruptMode(mode_).then(() => { - console.log('[JSAR] [SetInterruptMode] Setting: '+ (mode_ == 0? " share mode":"independent mode") + "success"); - }); - ``` diff --git a/en/application-dev/media/audio-output-device-management.md b/en/application-dev/media/audio-output-device-management.md new file mode 100644 index 0000000000000000000000000000000000000000..ad20276c60ce7e535f99778e18d04e4e50e29dc6 --- /dev/null +++ b/en/application-dev/media/audio-output-device-management.md @@ -0,0 +1,90 @@ +# Audio Output Device Management + +If a device is connected to multiple audio output devices, you can use **AudioRoutingManager** to specify an audio output device to play audio. For details about the API reference, see [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9). + +## Creating an AudioRoutingManager Instance + +Before using **AudioRoutingManager** to manage audio devices, import the audio module and create an **AudioManager** instance. + +```ts +import audio from '@ohos.multimedia.audio'; // Import the audio module. + +let audioManager = audio.getAudioManager(); // Create an AudioManager instance. + +let audioRoutingManager = audioManager.getRoutingManager(); // Call an API of AudioManager to create an AudioRoutingManager instance. +``` + +## Supported Audio Output Device Types + +The table below lists the supported audio output devices. + +| Name| Value| Description| +| -------- | -------- | -------- | +| EARPIECE | 1 | Earpiece.| +| SPEAKER | 2 | Speaker.| +| WIRED_HEADSET | 3 | Wired headset with a microphone.| +| WIRED_HEADPHONES | 4 | Wired headset without microphone.| +| BLUETOOTH_SCO | 7 | Bluetooth device using Synchronous Connection Oriented (SCO) links.| +| BLUETOOTH_A2DP | 8 | Bluetooth device using Advanced Audio Distribution Profile (A2DP) links.| +| USB_HEADSET | 22 | USB Type-C headset.| + +## Obtaining Output Device Information + +Use **getDevices()** to obtain information about all the output devices. + +```ts +audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => { + console.info('Promise returned to indicate that the device list is obtained.'); +}); +``` + +## Listening for Device Connection State Changes + +Set a listener to listen for changes of the device connection state. When a device is connected or disconnected, a callback is triggered. + +```ts +// Listen for connection state changes of audio devices. +audioRoutingManager.on('deviceChange', audio.DeviceFlag.OUTPUT_DEVICES_FLAG, (deviceChanged) => { + console.info('device change type: ' + deviceChanged.type); // Device connection state change. The value 0 means that the device is connected and 1 means that the device is disconnected. + console.info('device descriptor size : ' + deviceChanged.deviceDescriptors.length); + console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceRole); // Device role. + console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceType); // Device type. +}); + +// Cancel the listener for the connection state changes of audio devices. +audioRoutingManager.off('deviceChange', (deviceChanged) => { + console.info('Should be no callback.'); +}); +``` + +## Selecting an Audio Output Device (for System Applications only) + +Currently, only one output device can be selected, and the device ID is used as the unique identifier. For details about audio device descriptors, see [AudioDeviceDescriptors](../reference/apis/js-apis-audio.md#audiodevicedescriptors). + +> **NOTE** +> +> The user can connect to a group of audio devices (for example, a pair of Bluetooth headsets), but the system treats them as one device (a group of devices that share the same device ID). + +```ts +let outputAudioDeviceDescriptor = [{ + deviceRole : audio.DeviceRole.OUTPUT_DEVICE, + deviceType : audio.DeviceType.SPEAKER, + id : 1, + name : "", + address : "", + sampleRates : [44100], + channelCounts : [2], + channelMasks : [0], + networkId : audio.LOCAL_NETWORK_ID, + interruptGroupId : 1, + volumeGroupId : 1, +}]; + +async function selectOutputDevice(){ + audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor).then(() => { + console.info('Invoke selectOutputDevice succeeded.'); + }).catch((err) => { + console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`); + }); +} +``` diff --git a/en/application-dev/media/audio-overview.md b/en/application-dev/media/audio-overview.md deleted file mode 100755 index e1fd93eab8238b8ae55c9ce3dff2e807a1585a00..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-overview.md +++ /dev/null @@ -1,20 +0,0 @@ -# Audio Overview - -You can use APIs provided by the audio module to implement audio-related features, including audio playback and volume management. - -## Basic Concepts - -- **Sampling** - Sampling is a process to obtain discrete-time signals by extracting samples from analog signals in a continuous time domain at a specific interval. - -- **Sampling rate** - Sampling rate is the number of samples extracted from a continuous signal per second to form a discrete signal. It is measured in Hz. Generally, human hearing range is from 20 Hz to 20 kHz. Common audio sampling rates include 8 kHz, 11.025 kHz, 22.05 kHz, 16 kHz, 37.8 kHz, 44.1 kHz, 48 kHz, 96 kHz, and 192 kHz. - -- **Channel** - Channels refer to different spatial positions where independent audio signals are recorded or played. The number of channels is the number of audio sources used during audio recording, or the number of speakers used for audio playback. - -- **Audio frame** - Audio data is in stream form. For the convenience of audio algorithm processing and transmission, it is generally agreed that a data amount in a unit of 2.5 to 60 milliseconds is one audio frame. This unit is called sampling time, and its length is specific to codecs and the application requirements. - -- **PCM**
- Pulse code modulation (PCM) is a method used to digitally represent sampled analog signals. It converts continuous-time analog signals into discrete-time digital signal samples. diff --git a/en/application-dev/media/audio-playback-concurrency.md b/en/application-dev/media/audio-playback-concurrency.md new file mode 100644 index 0000000000000000000000000000000000000000..0b36594f6bef62c7ba7588bc8977af67609a6c9d --- /dev/null +++ b/en/application-dev/media/audio-playback-concurrency.md @@ -0,0 +1,119 @@ +# Audio Playback Concurrency Policy + +## Audio Interruption Policy + +If multiple audio streams are played at the same time, the user may feel uncomfortable or even painful. To address this issue, OpenHarmony presets the audio interruption policy so that only the audio stream holding audio focus can be played. + +When an application attempts to play an audio, the system requests audio focus for the audio stream. The audio stream that gains the focus can be played. If the request is rejected, the audio stream cannot be played. If the audio stream is interrupted by another, it loses the focus and therefore the playback is paused. All these actions are automatically performed by the system and do not require additional operations on the application. However, to maintain state consistency between the application and the system and ensure good user experience, it is recommended that the application [listen for the audio interruption event](#listening-for-the-audio-interruption-event) and perform the corresponding processing when receiving such an event (specified by [InterruptEvent](../reference/apis/js-apis-audio.md#interruptevent9)). + +OpenHarmony presets two [audio interruption modes](#audio-interruption-mode) to specify whether audio concurrency is controlled by the application or system. You can choose a mode for each of the audio streams created by the same application. + +The audio interruption policy determines the operations (for example, pause, resume, duck, or unduck) to be performed on the audio stream. These operations can be performed by the system or application. To distinguish the body that executes the operations, the [audio interruption type](#audio-interruption-type) is introduced, and two audio interruption types are preset. + +### Audio Interruption Mode + +Two audio interruption modes, specified by [InterruptMode](../reference/apis/js-apis-audio.md#interruptmode9), are preset in the audio interruption policy: + +- **SHARED_MODE**: Multiple audio streams created by an application share one audio focus. The concurrency rules between these audio streams are determined by the application, without the use of the audio interruption policy. However, if another application needs to play audio while one of these audio streams is being played, the audio interruption policy is triggered. + +- **INDEPENDENT_MODE**: Each audio stream created by an application has an independent audio focus. When multiple audio streams are played concurrently, the audio interruption policy is triggered. + +The application can select an audio interruption mode as required. By default, the **SHARED_MODE** is used. + +You can set the audio interruption mode in either of the following ways: + +- If you [use the AVPlayer to develop audio playback](using-avplayer-for-playback.md), set the [audioInterruptMode](../reference/apis/js-apis-media.md#avplayer9) attribute of the AVPlayer to set the audio interruption mode. + +- If you [use the AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md), call [setInterruptMode](../reference/apis/js-apis-audio.md#setinterruptmode9) of the AudioRenderer to set the audio interruption mode. + + +### Audio Interruption Type + +The audio interruption policy (containing two audio interruption modes) determines the operation to be performed on each audio stream. These operations can be carried out by the system or application. To distinguish the executors, the audio interruption type, specified by [InterruptForceType](../reference/apis/js-apis-audio.md#interruptforcetype9), is introduced. + +- **INTERRUPT_FORCE**: The operation is performed by the system. The system forcibly interrupts audio playback. + +- **INTERRUPT_SHARE**: The operation is performed by the application. The application can take action or ignore as required. + +For the pause operation, the **INTERRUPT_FORCE** type is always used and cannot be changed by the application. However, the application can choose to use **INTERRUPT_SHARE** for other operations, such as the resume operation. The application can obtain the audio interruption type based on the value of the member variable **forceType** in the audio interruption event. + +During audio playback, the system automatically requests, holds, and releases the focus for the audio stream. When audio interruption occurs, the system forcibly pauses or stops playing or ducks the volume down for the audio stream, and sends an audio interruption event callback to the application. To maintain state consistency between the application and the system and ensure good user experience, it is recommended that the application [listen for the audio interruption event](#listening-for-the-audio-interruption-event) and perform processing when receiving such an event. + +For operations that cannot be forcibly performed by the system (for example, resume), the system sends the audio interruption event containing **INTERRUPT_SHARE**, and the application can choose to take action or ignore. + +## Listening for the Audio Interruption Event + +Your application are advised to listen for the audio interruption event when playing audio. When audio interruption occurs, the system performs processing on the audio stream according to the preset policy, and sends the audio interruption event to the application. + +Upon the receipt of the event, the application carries out processing based on the event content to ensure that the application state is consistent with the expected effect. + +You can use either of the following methods to listen for the audio interruption event: + +- If you [use the AVPlayer to develop audio playback](using-avplayer-for-playback.md), call [on('audioInterrupt')](../reference/apis/js-apis-media.md#onaudiointerrupt9) of the AVPlayer to listen for the event. + +- If you [use the AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md), call [on('audioInterrupt')](../reference/apis/js-apis-audio.md#onaudiointerrupt9) of the AudioRenderer to listen for the event. + + To deliver an optimal user experience, the application needs to perform processing based on the event content. The following uses the AudioRenderer as an example to describe the recommended application processing. (The recommended processing is similar if the AVPlayer is used to develop audio playback.) You can customize the code to implement your own audio playback functionality or application processing based on service requirements. + +```ts +let isPlay; // An identifier specifying whether the audio stream is being played. In actual development, this parameter corresponds to the module related to the audio playback state. +let isDucked; // An identifier specifying whether to duck the volume down. In actual development, this parameter corresponds to the module related to the audio volume. +let started; // An identifier specifying whether the start operation is successful. + +async function onAudioInterrupt(){ + // The AudioRenderer is used as an example to describe how to develop audio playback. The audioRenderer variable is the AudioRenderer instance created for playback. + audioRenderer.on('audioInterrupt', async(interruptEvent) => { + // When an audio interruption event occurs, the audioRenderer receives the interruptEvent callback and performs processing based on the content in the callback. + // The audioRenderer reads the value of interruptEvent.forceType to see whether the system has forcibly performed the operation. + // The audioRenderer then reads the value of interruptEvent.hintType and performs corresponding processing. + if (interruptEvent.forceType === audio.InterruptForceType.INTERRUPT_FORCE) { + // If the value of interruptEvent.forceType is INTERRUPT_FORCE, the system has performed audio-related processing, and the application needs to update its state and make adjustments accordingly. + switch (interruptEvent.hintType) { + case audio.InterruptHint.INTERRUPT_HINT_PAUSE: + // The system has paused the audio stream (the focus is temporarily lost). To ensure state consistency, the application needs to switch to the audio paused state. + // Temporarily losing the focus: After the other audio stream releases the focus, the current audio stream will receive the audio interruption event corresponding to resume and automatically resume the playback. + isPlay = false; // A simplified processing indicating several operations for switching the application to the audio paused state. + break; + case audio.InterruptHint.INTERRUPT_HINT_STOP: + // The system has stopped the audio stream (the focus is permanently lost). To ensure state consistency, the application needs to switch to the audio paused state. + // Permanently losing the focus: No audio interruption event will be received. The user must manually trigger the operation to resume playback. + isPlay = false; // A simplified processing indicating several operations for switching the application to the audio paused state. + break; + case audio.InterruptHint.INTERRUPT_HINT_DUCK: + // The system has ducked the volume down (20% of the normal volume by default). To ensure state consistency, the application needs to switch to the volume decreased state. + // If the application does not want to play at a lower volume, it can select another processing mode, for example, proactively pausing the playback. + isDucked = true; // A simplified processing indicating several operations for switching the application to the volume decreased state. + break; + case audio.InterruptHint.INTERRUPT_HINT_UNDUCK: + // The system has restored the audio volume to normal. To ensure state consistency, the application needs to switch to the normal volume state. + isDucked = false; // A simplified processing indicating several operations for switching the application to the normal volume state. + break; + default: + break; + } + } else if (interruptEvent.forceType === audio.InterruptForceType.INTERRUPT_SHARE) { + // If the value of interruptEvent.forceType is INTERRUPT_SHARE, the application can take action or ignore as required. + switch (interruptEvent.hintType) { + case audio.InterruptHint.INTERRUPT_HINT_RESUME: + // The paused audio stream can be played. It is recommended that the application continue to play the audio stream and switch to the audio playing state. + // If the application does not want to continue the playback, it can ignore the event. + // To continue the playback, the application needs to call start(), and use the identifier variable started to record the execution result of start(). + await audioRenderer.start().then(async function () { + started = true; // Calling start() is successful. + }).catch((err) => { + started = false; // Calling start() fails. + }); + // If calling start() is successful, the application needs to switch to the audio playing state. + if (started) { + isPlay = true; // A simplified processing indicating several operations for switching the application to the audio playing state. + } else { + // Resuming the audio playback fails. + } + break; + default: + break; + } + } + }); +} +``` diff --git a/en/application-dev/media/audio-playback-overview.md b/en/application-dev/media/audio-playback-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..d17970d6de9b8b238db74d971ad5f58c605462eb --- /dev/null +++ b/en/application-dev/media/audio-playback-overview.md @@ -0,0 +1,25 @@ +# Audio Playback Development + +## Selecting an Audio Playback Development Mode + +OpenHarmony provides multiple classes for you to develop audio playback applications. You can select them based on the audio data formats, audio sources, audio usage scenarios, and even the programming language you use. Selecting a suitable class helps you reduce development workload and your application deliver a better effect. + +- [AVPlayer](using-avplayer-for-playback.md): provides ArkTS and JS APIs to implement audio and video playback. It also supports parsing streaming media and local assets, decapsulating media assets, decoding audio, and outputting audio. It can play audio files in MP3 and M4A formats, but not in PCM format. + +- [AudioRenderer](using-audiorenderer-for-playback.md): provides ArkTS and JS API to implement audio output. It supports only the PCM format and requires applications to continuously write audio data. The applications can perform data preprocessing, for example, setting the sampling rate and bit width of audio files, before audio input. This class can be used to develop more professional and diverse playback applications. To use this class, you must have basic audio processing knowledge. + +- [OpenSLES](using-opensl-es-for-playback.md): provides a set of standard, cross-platform, yet unique native audio APIs. It supports audio output in PCM format and is applicable to playback applications that are ported from other embedded platforms or that implements audio output at the native layer. + +- [TonePlayer](using-toneplayer-for-playback.md): provides ArkTS and JS API to implement the playback of dialing tones and ringback tones. It can be used to play the content selected from a fixed type range, without requiring the input of media assets or audio data. This class is application to specific scenarios where dialing tones and ringback tones are played. is available only to system applications. + +- Applications often need to use short sound effects, such as camera shutter sound effect, key press sound effect, and game shooting sound effect. Currently, only the **AVPlayer** class can implement audio file playback. More APIs will be provided to support this scenario in later versions. + +## Precautions for Developing Audio Playback Applications + +To enable your application to play a video in the background or when the screen is off, the application must meet the following conditions: + +1. The application is registered with the system for unified management through the **AVSession** APIs. Otherwise, the playback will be forcibly stopped when the application switches to the background. For details, see [AVSession Development](avsession-overview.md). + +2. The application must request a continuous task to prevent from being suspended. For details, see [Continuous Task Development](../task-management/continuous-task-dev-guide.md). + +If the playback is interrupted when the application switches to the background, you can view the log to see whether the application has requested a continuous task. If the application has requested a continuous task, there is no log recording **pause id**; otherwise, there is a log recording **pause id**. diff --git a/en/application-dev/media/audio-playback-stream-management.md b/en/application-dev/media/audio-playback-stream-management.md new file mode 100644 index 0000000000000000000000000000000000000000..c6cf398b8403b3f799a1db20716021c91ca6e078 --- /dev/null +++ b/en/application-dev/media/audio-playback-stream-management.md @@ -0,0 +1,120 @@ +# Audio Playback Stream Management + +An audio playback application must notice audio stream state changes and perform corresponding operations. For example, when detecting that an audio stream is being played or paused, the application must change the UI display of the **Play** button. + +## Reading or Listening for Audio Stream State Changes in the Application + +Create an AudioRenderer by referring to [Using AudioRenderer for Audio Playback](using-audiorenderer-for-playback.md) or [audio.createAudioRenderer](../reference/apis/js-apis-audio.md#audiocreateaudiorenderer8). Then obtain the audio stream state changes in either of the following ways: + +- Check the [state](../reference/apis/js-apis-audio.md#attributes) of the AudioRenderer. + + ```ts + let audioRendererState = audioRenderer.state; + console.info(`Current state is: ${audioRendererState }`) + ``` + +- Register **stateChange** to listen for state changes of the AudioRenderer. + + ```ts + audioRenderer.on('stateChange', (rendererState) => { + console.info(`State change to: ${rendererState}`) + }); + ``` + +The application then performs an operation, for example, changing the display of the **Play** button, by comparing the obtained state with [AudioState](../reference/apis/js-apis-audio.md#audiostate8). + +## Reading or Listening for Changes in All Audio Streams + +If an application needs to obtain the change information about all audio streams, it can use **AudioStreamManager** to read or listen for the changes of all audio streams. + +> **NOTE** +> +> The audio stream change information marked as the system API can be viewed only by system applications. + +The figure below shows the call relationship of audio stream management. + +![Call relationship of audio stream management](figures/audio-stream-mgmt-invoking-relationship.png) + +During application development, first use **getStreamManager()** to create an **AudioStreamManager** instance. Then call **on('audioRendererChange')** to listen for audio stream changes and obtain a notification when the audio stream state or device changes. To cancel the listening for these changes, call **off('audioRendererChange')**. You can also call **getCurrentAudioRendererInfoArray()** to obtain information such as the unique ID of the playback stream, UID of the playback stream client, and stream status. + +For details about the APIs, see [AudioStreamManager](../reference/apis/js-apis-audio.md#audiostreammanager9). + +## How to Develop + +1. Create an **AudioStreamManager** instance. + + Before using **AudioStreamManager** APIs, you must use **getStreamManager()** to create an **AudioStreamManager** instance. + + ```ts + import audio from '@ohos.multimedia.audio'; + let audioManager = audio.getAudioManager(); + let audioStreamManager = audioManager.getStreamManager(); + ``` + +2. Use **on('audioRendererChange')** to listen for audio playback stream changes. If the application needs to receive a notification when the audio playback stream state or device changes, it can subscribe to this event. + + ```ts + audioStreamManager.on('audioRendererChange', (AudioRendererChangeInfoArray) => { + for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) { + let AudioRendererChangeInfo = AudioRendererChangeInfoArray[i]; + console.info(`## RendererChange on is called for ${i} ##`); + console.info(`StreamId for ${i} is: ${AudioRendererChangeInfo.streamId}`); + console.info(`Content ${i} is: ${AudioRendererChangeInfo.rendererInfo.content}`); + console.info(`Stream ${i} is: ${AudioRendererChangeInfo.rendererInfo.usage}`); + console.info(`Flag ${i} is: ${AudioRendererChangeInfo.rendererInfo.rendererFlags}`); + for (let j = 0;j < AudioRendererChangeInfo.deviceDescriptors.length; j++) { + console.info(`Id: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].id}`); + console.info(`Type: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceType}`); + console.info(`Role: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceRole}`); + console.info(`Name: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].name}`); + console.info(`Address: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].address}`); + console.info(`SampleRates: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]}`); + console.info(`ChannelCount ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]}`); + console.info(`ChannelMask: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelMasks}`); + } + } + }); + ``` + +3. (Optional) Use **off('audioRendererChange')** to cancel listening for audio playback stream changes. + + ```ts + audioStreamManager.off('audioRendererChange'); + console.info('RendererChange Off is called '); + ``` + +4. (Optional) Call **getCurrentAudioRendererInfoArray()** to obtain the information about all audio playback streams. + + This API can be used to obtain the unique ID of the audio playback stream, UID of the audio playback client, audio status, and other information about the audio player. + > **NOTE** + > + > Before listening for state changes of all audio streams, the application must request the **ohos.permission.USE_BLUETOOTH** [permission](../security/accesstoken-guidelines.md), for the device name and device address (Bluetooth related attributes) to be displayed correctly. + + ```ts + async function getCurrentAudioRendererInfoArray(){ + await audioStreamManager.getCurrentAudioRendererInfoArray().then( function (AudioRendererChangeInfoArray) { + console.info(`getCurrentAudioRendererInfoArray Get Promise is called `); + if (AudioRendererChangeInfoArray != null) { + for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) { + let AudioRendererChangeInfo = AudioRendererChangeInfoArray[i]; + console.info(`StreamId for ${i} is: ${AudioRendererChangeInfo.streamId}`); + console.info(`Content ${i} is: ${AudioRendererChangeInfo.rendererInfo.content}`); + console.info(`Stream ${i} is: ${AudioRendererChangeInfo.rendererInfo.usage}`); + console.info(`Flag ${i} is: ${AudioRendererChangeInfo.rendererInfo.rendererFlags}`); + for (let j = 0;j < AudioRendererChangeInfo.deviceDescriptors.length; j++) { + console.info(`Id: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].id}`); + console.info(`Type: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceType}`); + console.info(`Role: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceRole}`); + console.info(`Name: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].name}`); + console.info(`Address: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].address}`); + console.info(`SampleRates: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]}`); + console.info(`ChannelCount ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]}`); + console.info(`ChannelMask: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelMasks}`); + } + } + } + }).catch((err) => { + console.error(`Invoke getCurrentAudioRendererInfoArray failed, code is ${err.code}, message is ${err.message}`); + }); + } + ``` diff --git a/en/application-dev/media/audio-playback.md b/en/application-dev/media/audio-playback.md deleted file mode 100644 index 1c7953d32b8ecee4c0ff34e82ab8d13947ac9271..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-playback.md +++ /dev/null @@ -1,243 +0,0 @@ -# Audio Playback Development - -## Introduction - -You can use audio playback APIs to convert audio data into audible analog signals and play the signals using output devices. You can also manage playback tasks. For example, you can control the playback and volume, obtain track information, and release resources. - -## Working Principles - -The following figures show the audio playback state transition and the interaction with external modules for audio playback. - -**Figure 1** Audio playback state transition - -![en-us_image_audio_state_machine](figures/en-us_image_audio_state_machine.png) - -**NOTE**: If the status is **Idle**, setting the **src** attribute does not change the status. In addition, after the **src** attribute is set successfully, you must call **reset()** before setting it to another value. - - - -**Figure 2** Interaction with external modules for audio playback - -![en-us_image_audio_player](figures/en-us_image_audio_player.png) - -**NOTE**: When a third-party application calls the JS interface provided by the JS interface layer to implement a feature, the framework layer invokes the audio component through the media service of the native framework and outputs the audio data decoded by the software to the audio HDI of the hardware interface layer to implement audio playback. - -## How to Develop - -For details about the APIs, see [AudioPlayer in the Media API](../reference/apis/js-apis-media.md#audioplayer). - -> **NOTE** -> -> The method for obtaining the path in the FA model is different from that in the stage model. For details about how to obtain the path, see [Application Sandbox Path Guidelines](../reference/apis/js-apis-fileio.md#guidelines). - -### Full-Process Scenario - -The full audio playback process includes creating an instance, setting the URI, playing audio, seeking to the playback position, setting the volume, pausing playback, obtaining track information, stopping playback, resetting the player, and releasing resources. - -For details about the **src** types supported by **AudioPlayer**, see the [src attribute](../reference/apis/js-apis-media.md#audioplayer_attributes). - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -// Print the stream track information. -function printfDescription(obj) { - for (let item in obj) { - let property = obj[item]; - console.info('audio key is ' + item); - console.info('audio value is ' + property); - } -} - -// Set the player callbacks. -function setCallBack(audioPlayer) { - audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully. - console.info('audio set source success'); - audioPlayer.play(); // The play() API can be invoked only after the 'dataLoad' event callback is complete. The 'play' event callback is then triggered. - }); - audioPlayer.on('play', () => { // Set the 'play' event callback. - console.info('audio play success'); - audioPlayer.pause(); // Trigger the 'pause' event callback and pause the playback. - }); - audioPlayer.on('pause', () => { // Set the 'pause' event callback. - console.info('audio pause success'); - audioPlayer.seek(5000); // Trigger the 'timeUpdate' event callback, and seek to 5000 ms for playback. - }); - audioPlayer.on('stop', () => { // Set the 'stop' event callback. - console.info('audio stop success'); - audioPlayer.reset(); // Trigger the 'reset' event callback, and reconfigure the src attribute to switch to the next song. - }); - audioPlayer.on('reset', () => { // Set the 'reset' event callback. - console.info('audio reset success'); - audioPlayer.release(); // Release the AudioPlayer instance. - audioPlayer = undefined; - }); - audioPlayer.on('timeUpdate', (seekDoneTime) => { // Set the 'timeUpdate' event callback. - if (typeof(seekDoneTime) == 'undefined') { - console.info('audio seek fail'); - return; - } - console.info('audio seek success, and seek time is ' + seekDoneTime); - audioPlayer.setVolume(0.5); // Trigger the 'volumeChange' event callback. - }); - audioPlayer.on('volumeChange', () => { // Set the 'volumeChange' event callback. - console.info('audio volumeChange success'); - audioPlayer.getTrackDescription((error, arrlist) => { // Obtain the audio track information in callback mode. - if (typeof (arrlist) != 'undefined') { - for (let i = 0; i < arrlist.length; i++) { - printfDescription(arrlist[i]); - } - } else { - console.log(`audio getTrackDescription fail, error:${error.message}`); - } - audioPlayer.stop(); // Trigger the 'stop' event callback to stop the playback. - }); - }); - audioPlayer.on('finish', () => { // Set the 'finish' event callback, which is triggered when the playback is complete. - console.info('audio play finish'); - }); - audioPlayer.on('error', (error) => { // Set the 'error' event callback. - console.info(`audio error called, errName is ${error.name}`); - console.info(`audio error called, errCode is ${error.code}`); - console.info(`audio error called, errMessage is ${error.message}`); - }); -} - -async function audioPlayerDemo() { - // 1. Create an AudioPlayer instance. - let audioPlayer = media.createAudioPlayer(); - setCallBack(audioPlayer); // Set the event callbacks. - // 2. Set the URI of the audio file. - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command. - let path = pathDir + '/01.mp3' - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback. -} -``` - -### Normal Playback Scenario - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -export class AudioDemo { - // Set the player callbacks. - setCallBack(audioPlayer) { - audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully. - console.info('audio set source success'); - audioPlayer.play(); // Call the play() API to start the playback and trigger the 'play' event callback. - }); - audioPlayer.on('play', () => { // Set the 'play' event callback. - console.info('audio play success'); - }); - audioPlayer.on('finish', () => { // Set the 'finish' event callback, which is triggered when the playback is complete. - console.info('audio play finish'); - audioPlayer.release(); // Release the AudioPlayer instance. - audioPlayer = undefined; - }); - } - - async audioPlayerDemo() { - let audioPlayer = media.createAudioPlayer(); // Create an AudioPlayer instance. - this.setCallBack(audioPlayer); // Set the event callbacks. - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command. - let path = pathDir + '/01.mp3' - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback. - } -} -``` - -### Switching to the Next Song - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -export class AudioDemo { -// Set the player callbacks. - private isNextMusic = false; - setCallBack(audioPlayer) { - audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully. - console.info('audio set source success'); - audioPlayer.play(); // Call the play() API to start the playback and trigger the 'play' event callback. - }); - audioPlayer.on('play', () => { // Set the 'play' event callback. - console.info('audio play success'); - audioPlayer.reset(); // Call the reset() API and trigger the 'reset' event callback. - }); - audioPlayer.on('reset', () => { // Set the 'reset' event callback. - console.info('audio play success'); - if (!this.isNextMusic) { // When isNextMusic is false, changing songs is implemented. - this.nextMusic(audioPlayer); // Changing songs is implemented. - } else { - audioPlayer.release(); // Release the AudioPlayer instance. - audioPlayer = undefined; - } - }); - } - - async nextMusic(audioPlayer) { - this.isNextMusic = true; - let nextFdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\02.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command. - let nextpath = pathDir + '/02.mp3' - let nextFile = await fs.open(nextpath); - nextFdPath = nextFdPath + '' + nextFile.fd; - audioPlayer.src = nextFdPath; // Set the src attribute and trigger the 'dataLoad' event callback. - } - - async audioPlayerDemo() { - let audioPlayer = media.createAudioPlayer(); // Create an AudioPlayer instance. - this.setCallBack(audioPlayer); // Set the event callbacks. - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command. - let path = pathDir + '/01.mp3' - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback. - } -} -``` - -### Looping a Song - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -export class AudioDemo { - // Set the player callbacks. - setCallBack(audioPlayer) { - audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully. - console.info('audio set source success'); - audioPlayer.loop = true; // Set the loop playback attribute. - audioPlayer.play(); // Call the play() API to start the playback and trigger the 'play' event callback. - }); - audioPlayer.on('play', () => { // Set the 'play' event callback to start loop playback. - console.info('audio play success'); - }); - } - - async audioPlayerDemo() { - let audioPlayer = media.createAudioPlayer(); // Create an AudioPlayer instance. - this.setCallBack(audioPlayer); // Set the event callbacks. - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command. - let path = pathDir + '/01.mp3' - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback. - } -} -``` diff --git a/en/application-dev/media/audio-recorder.md b/en/application-dev/media/audio-recorder.md deleted file mode 100644 index 78650a61d0a803811394e623ab0bc46155438ba9..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-recorder.md +++ /dev/null @@ -1,197 +0,0 @@ -# Audio Recording Development - -## Introduction - -During audio recording, audio signals are captured, encoded, and saved to files. You can specify parameters such as the sampling rate, number of audio channels, encoding format, encapsulation format, and output file path for audio recording. - -## Working Principles - -The following figures show the audio recording state transition and the interaction with external modules for audio recording. - -**Figure 1** Audio recording state transition - -![en-us_image_audio_recorder_state_machine](figures/en-us_image_audio_recorder_state_machine.png) - - - -**Figure 2** Interaction with external modules for audio recording - -![en-us_image_audio_recorder_zero](figures/en-us_image_audio_recorder_zero.png) - -**NOTE**: When a third-party recording application or recorder calls the JS interface provided by the JS interface layer to implement a feature, the framework layer invokes the audio component through the media service of the native framework to obtain the audio data captured through the audio HDI. The framework layer then encodes the audio data through software and saves the encoded and encapsulated audio data to a file to implement audio recording. - -## Constraints - -Before developing audio recording, configure the **ohos.permission.MICROPHONE** permission for your application. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md). - -## How to Develop - -For details about the APIs, see [AudioRecorder in the Media API](../reference/apis/js-apis-media.md#audiorecorder). - -### Full-Process Scenario - -The full audio recording process includes creating an instance, setting recording parameters, starting, pausing, resuming, and stopping recording, and releasing resources. - -```js -import media from '@ohos.multimedia.media' -import mediaLibrary from '@ohos.multimedia.mediaLibrary' -export class AudioRecorderDemo { - private testFdNumber; // Used to save the FD address. - - // Set the callbacks related to audio recording. - setCallBack(audioRecorder) { - audioRecorder.on('prepare', () => { // Set the prepare event callback. - console.log('prepare success'); - audioRecorder.start(); // Call the start API to start recording and trigger the start event callback. - }); - audioRecorder.on('start', () => { // Set the start event callback. - console.log('audio recorder start success'); - audioRecorder.pause(); // Call the pause API to pause recording and trigger the pause event callback. - }); - audioRecorder.on('pause', () => { // Set the pause event callback. - console.log('audio recorder pause success'); - audioRecorder.resume(); // Call the resume API to resume recording and trigger the resume event callback. - }); - audioRecorder.on('resume', () => { // Set the resume event callback. - console.log('audio recorder resume success'); - audioRecorder.stop(); // Call the stop API to stop recording and trigger the stop event callback. - }); - audioRecorder.on('stop', () => { // Set the stop event callback. - console.log('audio recorder stop success'); - audioRecorder.reset(); // Call the reset API to reset the recorder and trigger the reset event callback. - }); - audioRecorder.on('reset', () => { // Set the reset event callback. - console.log('audio recorder reset success'); - audioRecorder.release(); // Call the release API to release resources and trigger the release event callback. - }); - audioRecorder.on('release', () => { // Set the release event callback. - console.log('audio recorder release success'); - audioRecorder = undefined; - }); - audioRecorder.on('error', (error) => { // Set the error event callback. - console.info(`audio error called, errName is ${error.name}`); - console.info(`audio error called, errCode is ${error.code}`); - console.info(`audio error called, errMessage is ${error.message}`); - }); - } - - // pathName indicates the passed recording file name, for example, 01.mp3. The generated file address is /storage/media/100/local/files/Video/01.mp3. - // To use the media library, declare the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA. - async getFd(pathName) { - let displayName = pathName; - const mediaTest = mediaLibrary.getMediaLibrary(); - let fileKeyObj = mediaLibrary.FileKey; - let mediaType = mediaLibrary.MediaType.VIDEO; - let publicPath = await mediaTest.getPublicDirectory(mediaLibrary.DirectoryType.DIR_VIDEO); - let dataUri = await mediaTest.createAsset(mediaType, displayName, publicPath); - if (dataUri != undefined) { - let args = dataUri.id.toString(); - let fetchOp = { - selections : fileKeyObj.ID + "=?", - selectionArgs : [args], - } - let fetchFileResult = await mediaTest.getFileAssets(fetchOp); - let fileAsset = await fetchFileResult.getAllObject(); - let fdNumber = await fileAsset[0].open('Rw'); - this.testFdNumber = "fd://" + fdNumber.toString(); - } - } - - async audioRecorderDemo() { - // 1. Create an AudioRecorder instance. - let audioRecorder = media.createAudioRecorder(); - // 2. Set the callbacks. - this.setCallBack(audioRecorder); - await this.getFd('01.mp3'); // Call the getFd method to obtain the FD address of the file to be recorded. - // 3. Set the recording parameters. - let audioRecorderConfig = { - audioEncodeBitRate : 22050, - audioSampleRate : 22050, - numberOfChannels : 2, - uri : this.testFdNumber, // testFdNumber is generated by getFd. - location : { latitude : 30, longitude : 130}, - audioEncoderMime : media.CodecMimeType.AUDIO_AAC, - fileFormat : media.ContainerFormatType.CFT_MPEG_4A, - } - audioRecorder.prepare(audioRecorderConfig); // Call the prepare method to trigger the prepare event callback. - } -} -``` - -### Normal Recording Scenario - -Unlike the full-process scenario, the normal recording scenario does not include the process of pausing and resuming recording. - -```js -import media from '@ohos.multimedia.media' -import mediaLibrary from '@ohos.multimedia.mediaLibrary' -export class AudioRecorderDemo { - private testFdNumber; // Used to save the FD address. - - // Set the callbacks related to audio recording. - setCallBack(audioRecorder) { - audioRecorder.on('prepare', () => { // Set the prepare event callback. - console.log('prepare success'); - audioRecorder.start(); // Call the start API to start recording and trigger the start event callback. - }); - audioRecorder.on('start', () => { // Set the start event callback. - console.log('audio recorder start success'); - audioRecorder.stop(); // Call the stop API to stop recording and trigger the stop event callback. - }); - audioRecorder.on('stop', () => { // Set the stop event callback. - console.log('audio recorder stop success'); - audioRecorder.release(); // Call the release API to release resources and trigger the release event callback. - }); - audioRecorder.on('release', () => { // Set the release event callback. - console.log('audio recorder release success'); - audioRecorder = undefined; - }); - audioRecorder.on('error', (error) => { // Set the error event callback. - console.info(`audio error called, errName is ${error.name}`); - console.info(`audio error called, errCode is ${error.code}`); - console.info(`audio error called, errMessage is ${error.message}`); - }); - } - - // pathName indicates the passed recording file name, for example, 01.mp3. The generated file address is /storage/media/100/local/files/Video/01.mp3. - // To use the media library, declare the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA. - async getFd(pathName) { - let displayName = pathName; - const mediaTest = mediaLibrary.getMediaLibrary(); - let fileKeyObj = mediaLibrary.FileKey; - let mediaType = mediaLibrary.MediaType.VIDEO; - let publicPath = await mediaTest.getPublicDirectory(mediaLibrary.DirectoryType.DIR_VIDEO); - let dataUri = await mediaTest.createAsset(mediaType, displayName, publicPath); - if (dataUri != undefined) { - let args = dataUri.id.toString(); - let fetchOp = { - selections : fileKeyObj.ID + "=?", - selectionArgs : [args], - } - let fetchFileResult = await mediaTest.getFileAssets(fetchOp); - let fileAsset = await fetchFileResult.getAllObject(); - let fdNumber = await fileAsset[0].open('Rw'); - this.testFdNumber = "fd://" + fdNumber.toString(); - } - } - - async audioRecorderDemo() { - // 1. Create an AudioRecorder instance. - let audioRecorder = media.createAudioRecorder(); - // 2. Set the callbacks. - this.setCallBack(audioRecorder); - await this.getFd('01.mp3'); // Call the getFd method to obtain the FD address of the file to be recorded. - // 3. Set the recording parameters. - let audioRecorderConfig = { - audioEncodeBitRate : 22050, - audioSampleRate : 22050, - numberOfChannels : 2, - uri : this.testFdNumber, // testFdNumber is generated by getFd. - location : { latitude : 30, longitude : 130}, - audioEncoderMime : media.CodecMimeType.AUDIO_AAC, - fileFormat : media.ContainerFormatType.CFT_MPEG_4A, - } - audioRecorder.prepare(audioRecorderConfig); // Call the prepare method to trigger the prepare event callback. - } -} -``` diff --git a/en/application-dev/media/audio-recording-overview.md b/en/application-dev/media/audio-recording-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..698255fddd78d98f9e635b16b3db94e6980bd4a0 --- /dev/null +++ b/en/application-dev/media/audio-recording-overview.md @@ -0,0 +1,17 @@ +# Audio Recording Development + +## Selecting an Audio Recording Development Mode + +OpenHarmony provides multiple classes for you to develop audio recording applications. You can select them based on the recording output formats, audio usage scenarios, and even the programming language you use. Selecting a suitable class helps you reduce development workload and your application deliver a better effect. + +- [AVRecorder](using-avrecorder-for-recording.md): provides ArkTS and JS APIs to implement audio and video recording. It also supports audio input, audio encoding, and media encapsulation. You can directly call device hardware, such as microphone, for recording and generate M4A audio files. + +- [AudioCapturer](using-audiocapturer-for-recording.md): provides ArkTS and JS API to implement audio input. It supports only the PCM format and requires applications to continuously read audio data. The application can perform data processing after audio output. This class can be used to develop more professional and diverse recording applications. To use this class, you must have basic audio processing knowledge. + +- [OpenSLES](using-opensl-es-for-recording.md): provides a set of standard, cross-platform, yet unique native audio APIs. It supports audio input in PCM format and is applicable to recording applications that are ported from other embedded platforms or that implements audio input at the native layer. + +## Precautions for Developing Audio Recording Applications + +The application must request the **ohos.permission.MICROPHONE** permission from the user before invoking the microphone to record audio. + +For details about how to request the permission, see [Permission Application Guide](../security/accesstoken-guidelines.md). For details about how to use and manage microphones, see [Microphone Management](mic-management.md). diff --git a/en/application-dev/media/audio-recording-stream-management.md b/en/application-dev/media/audio-recording-stream-management.md new file mode 100644 index 0000000000000000000000000000000000000000..8161d1bd5bbe5fbc55560ab557570baaaa99976a --- /dev/null +++ b/en/application-dev/media/audio-recording-stream-management.md @@ -0,0 +1,118 @@ +# Audio Recording Stream Management + +An audio recording application must notice audio stream state changes and perform corresponding operations. For example, when detecting that the user stops recording, the application must notify the user that the recording finishes. + +## Reading or Listening for Audio Stream State Changes in the Application + +Create an AudioCapturer by referring to [Using AudioCapturer for Audio Recording](using-audiocapturer-for-recording.md) or [audio.createAudioCapturer](../reference/apis/js-apis-audio.md#audiocreateaudiocapturer8). Then obtain the audio stream state changes in either of the following ways: + +- Check the [state](../reference/apis/js-apis-audio.md#attributes) of the AudioCapturer. + + ```ts + let audioCapturerState = audioCapturer.state; + console.info(`Current state is: ${audioCapturerState }`) + ``` + +- Register **stateChange** to listen for state changes of the AudioCapturer. + + ```ts + audioCapturer.on('stateChange', (capturerState) => { + console.info(`State change to: ${capturerState}`) + }); + ``` + +The application then performs an operation, for example, displays a message indicating the end of the recording, by comparing the obtained state with [AudioState](../reference/apis/js-apis-audio.md#audiostate8). + +## Reading or Listening for Changes in All Audio Streams + +If an application needs to obtain the change information about all audio streams, it can use **AudioStreamManager** to read or listen for the changes of all audio streams. + +> **NOTE** +> +> The audio stream change information marked as the system API can be viewed only by system applications. + +The figure below shows the call relationship of audio stream management. + +![Call relationship of recording stream management](figures/invoking-relationship-recording-stream-mgmt.png) + +During application development, first use **getStreamManager()** to create an **AudioStreamManager** instance. Then call **on('audioCapturerChange')** to listen for audio stream changes and obtain a notification when the audio stream state or device changes. To cancel the listening for these changes, call **off('audioCapturerChange')**. You can call **getCurrentAudioCapturerInfoArray()** to obtain information such as the unique ID of the recording stream, UID of the recording stream client, and stream status. + +For details about the APIs, see [AudioStreamManager](../reference/apis/js-apis-audio.md#audiostreammanager9). + + +## How to Develop + +1. Create an **AudioStreamManager** instance. + + Before using **AudioStreamManager** APIs, you must use **getStreamManager()** to create an **AudioStreamManager** instance. + + ```ts + import audio from '@ohos.multimedia.audio'; + let audioManager = audio.getAudioManager(); + let audioStreamManager = audioManager.getStreamManager(); + ``` + +2. Use **on('audioCapturerChange')** to listen for audio recording stream changes. If the application needs to receive a notification when the audio recording stream state or device changes, it can subscribe to this event. + + ```ts + audioStreamManager.on('audioCapturerChange', (AudioCapturerChangeInfoArray) => { + for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) { + console.info(`## CapChange on is called for element ${i} ##`); + console.info(`StreamId for ${i} is: ${AudioCapturerChangeInfoArray[i].streamId}`); + console.info(`Source for ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.source}`); + console.info(`Flag ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags}`); + let devDescriptor = AudioCapturerChangeInfoArray[i].deviceDescriptors; + for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) { + console.info(`Id: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id}`); + console.info(`Type: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType}`); + console.info(`Role: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole}`); + console.info(`Name: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name}`); + console.info(`Address: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address}`); + console.info(`SampleRates: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]}`); + console.info(`ChannelCounts ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]}`); + console.info(`ChannelMask: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks}`); + } + } + }); + ``` + +3. (Optional) Use **off('audioCapturerChange')** to cancel listening for audio recording stream changes. + + ```ts + audioStreamManager.off('audioCapturerChange'); + console.info('CapturerChange Off is called'); + ``` + +4. (Optional) Call **getCurrentAudioCapturerInfoArray()** to obtain information about the current audio recording stream. + + This API can be used to obtain the unique ID of the audio recording stream, UID of the audio recording client, audio status, and other information about the AudioCapturer. + > **NOTE** + > + > Before listening for state changes of all audio streams, the application must request the **ohos.permission.USE_BLUETOOTH** [permission](../security/accesstoken-guidelines.md), for the device name and device address (Bluetooth related attributes) to be displayed correctly. + + ```ts + async function getCurrentAudioCapturerInfoArray(){ + await audioStreamManager.getCurrentAudioCapturerInfoArray().then( function (AudioCapturerChangeInfoArray) { + console.info('getCurrentAudioCapturerInfoArray Get Promise Called '); + if (AudioCapturerChangeInfoArray != null) { + for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) { + console.info(`StreamId for ${i} is: ${AudioCapturerChangeInfoArray[i].streamId}`); + console.info(`Source for ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.source}`); + console.info(`Flag ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags}`); + for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) { + console.info(`Id: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id}`); + console.info(`Type: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType}`); + console.info(`Role: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole}`); + console.info(`Name: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name}`); + console.info(`Address: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address}`); + console.info(`SampleRates: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]}`); + console.info(`ChannelCounts ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]}`); + console.info(`ChannelMask: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks}`); + } + } + } + }).catch((err) => { + console.error(`Invoke getCurrentAudioCapturerInfoArray failed, code is ${err.code}, message is ${err.message}`); + }); + } + ``` diff --git a/en/application-dev/media/audio-renderer.md b/en/application-dev/media/audio-renderer.md deleted file mode 100644 index 0a58ea5251744162d9948c23e75351b298a95bb8..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-renderer.md +++ /dev/null @@ -1,522 +0,0 @@ -# Audio Rendering Development - -## Introduction - -**AudioRenderer** provides APIs for rendering audio files and controlling playback. It also supports audio interruption. You can use the APIs provided by **AudioRenderer** to play audio files in output devices and manage playback tasks. -Before calling the APIs, be familiar with the following terms: - -- **Audio interruption**: When an audio stream with a higher priority needs to be played, the audio renderer interrupts the stream with a lower priority. For example, if a call comes in when the user is listening to music, the music playback, which is the lower priority stream, is paused. -- **Status check**: During application development, you are advised to use **on('stateChange')** to subscribe to state changes of the **AudioRenderer** instance. This is because some operations can be performed only when the audio renderer is in a given state. If the application performs an operation when the audio renderer is not in the given state, the system may throw an exception or generate other undefined behavior. -- **Asynchronous operation**: To prevent the UI thread from being blocked, most **AudioRenderer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the promise functions. For more information, see [AudioRenderer in Audio Management](../reference/apis/js-apis-audio.md#audiorenderer8). -- **Audio interruption mode**: OpenHarmony provides two audio interruption modes: **shared mode** and **independent mode**. In shared mode, all **AudioRenderer** instances created by the same application share one focus object, and there is no focus transfer inside the application. Therefore, no callback will be triggered. In independent mode, each **AudioRenderer** instance has an independent focus object, and focus transfer is triggered by focus preemption. When focus transfer occurs, the **AudioRenderer** instance that is having the focus receives a notification through the callback. By default, the shared mode is used. You can call **setInterruptMode()** to switch to the independent mode. - -## Working Principles - -The following figure shows the audio renderer state transitions. - -**Figure 1** Audio renderer state transitions - -![audio-renderer-state](figures/audio-renderer-state.png) - -- **PREPARED**: The audio renderer enters this state by calling **create()**. -- **RUNNING**: The audio renderer enters this state by calling **start()** when it is in the **PREPARED** state or by calling **start()** when it is in the **STOPPED** state. -- **PAUSED**: The audio renderer enters this state by calling **pause()** when it is in the **RUNNING** state. When the audio playback is paused, it can call **start()** to resume the playback. -- **STOPPED**: The audio renderer enters this state by calling **stop()** when it is in the **PAUSED** or **RUNNING** state. -- **RELEASED**: The audio renderer enters this state by calling **release()** when it is in the **PREPARED**, **PAUSED**, or **STOPPED** state. In this state, the audio renderer releases all occupied hardware and software resources and will not transit to any other state. - -## How to Develop - -For details about the APIs, see [AudioRenderer in Audio Management](../reference/apis/js-apis-audio.md#audiorenderer8). - -1. Use **createAudioRenderer()** to create a global **AudioRenderer** instance. - Set parameters of the **AudioRenderer** instance in **audioRendererOptions**. This instance is used to render audio, control and obtain the rendering status, and register a callback for notification. - - ```js - import audio from '@ohos.multimedia.audio'; - import fs from '@ohos.file.fs'; - - // Perform a self-test on APIs related to audio rendering. - @Entry - @Component - struct AudioRenderer1129 { - private audioRenderer: audio.AudioRenderer; - private bufferSize; // It will be used for the call of the write function in step 3. - private audioRenderer1: audio.AudioRenderer; // It will be used for the call in the complete example in step 14. - private audioRenderer2: audio.AudioRenderer; // It will be used for the call in the complete example in step 14. - - async initAudioRender(){ - let audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - let audioRendererInfo = { - content: audio.ContentType.CONTENT_TYPE_SPEECH, - usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, - rendererFlags: 0 // 0 is the extended flag bit of the audio renderer. The default value is 0. - } - let audioRendererOptions = { - streamInfo: audioStreamInfo, - rendererInfo: audioRendererInfo - } - this.audioRenderer = await audio.createAudioRenderer(audioRendererOptions); - console.log("Create audio renderer success."); - } - } - ``` - -2. Use **start()** to start audio rendering. - - ```js - async startRenderer() { - let state = this.audioRenderer.state; - // The audio renderer should be in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state when start() is called. - if (state != audio.AudioState.STATE_PREPARED && state != audio.AudioState.STATE_PAUSED && - state != audio.AudioState.STATE_STOPPED) { - console.info('Renderer is not in a correct state to start'); - return; - } - - await this.audioRenderer.start(); - - state = this.audioRenderer.state; - if (state == audio.AudioState.STATE_RUNNING) { - console.info('Renderer started'); - } else { - console.error('Renderer start failed'); - } - } - ``` - - The renderer state will be **STATE_RUNNING** once the audio renderer is started. The application can then begin reading buffers. - -3. Call **write()** to write data to the buffer. - - Read the audio data to be played to the buffer. Call **write()** repeatedly to write the data to the buffer. Import fs from '@ohos.file.fs'; as step 1. - - ```js - async writeData(){ - // Set a proper buffer size for the audio renderer. You can also select a buffer of another size. - this.bufferSize = await this.audioRenderer.getBufferSize(); - let dir = globalThis.fileDir; // You must use the sandbox path. - const filePath = dir + '/file_example_WAV_2MG.wav'; // The file to render is in the following path: /data/storage/el2/base/haps/entry/files/file_example_WAV_2MG.wav - console.info(`file filePath: ${ filePath}`); - - let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); - let stat = await fs.stat(filePath); // Music file information. - let buf = new ArrayBuffer(this.bufferSize); - let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); - for (let i = 0;i < len; i++) { - let options = { - offset: i * this.bufferSize, - length: this.bufferSize - } - let readsize = await fs.read(file.fd, buf, options) - let writeSize = await new Promise((resolve,reject)=>{ - this.audioRenderer.write(buf,(err,writeSize)=>{ - if(err){ - reject(err) - }else{ - resolve(writeSize) - } - }) - }) - } - - fs.close(file) - await this.audioRenderer.stop(); // Stop rendering. - await this.audioRenderer.release(); // Release the resources. - } - ``` - -4. (Optional) Call **pause()** or **stop()** to pause or stop rendering. - - ```js - async pauseRenderer() { - let state = this.audioRenderer.state; - // The audio renderer can be paused only when it is in the STATE_RUNNING state. - if (state != audio.AudioState.STATE_RUNNING) { - console.info('Renderer is not running'); - return; - } - - await this.audioRenderer.pause(); - - state = this.audioRenderer.state; - if (state == audio.AudioState.STATE_PAUSED) { - console.info('Renderer paused'); - } else { - console.error('Renderer pause failed'); - } - } - - async stopRenderer() { - let state = this.audioRenderer.state; - // The audio renderer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state. - if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { - console.info('Renderer is not running or paused'); - return; - } - - await this.audioRenderer.stop(); - - state = this.audioRenderer.state; - if (state == audio.AudioState.STATE_STOPPED) { - console.info('Renderer stopped'); - } else { - console.error('Renderer stop failed'); - } - } - ``` - -5. (Optional) Call **drain()** to clear the buffer. - - ```js - async drainRenderer() { - let state = this.audioRenderer.state; - // drain() can be used only when the audio renderer is in the STATE_RUNNING state. - if (state != audio.AudioState.STATE_RUNNING) { - console.info('Renderer is not running'); - return; - } - - await this.audioRenderer.drain(); - state = this.audioRenderer.state; - } - ``` - -6. After the task is complete, call **release()** to release related resources. - - **AudioRenderer** uses a large number of system resources. Therefore, ensure that the resources are released after the task is complete. - - ```js - async releaseRenderer() { - let state = this.audioRenderer.state; - // The audio renderer can be released only when it is not in the STATE_RELEASED or STATE_NEW state. - if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { - console.info('Renderer already released'); - return; - } - await this.audioRenderer.release(); - - state = this.audioRenderer.state; - if (state == audio.AudioState.STATE_RELEASED) { - console.info('Renderer released'); - } else { - console.info('Renderer release failed'); - } - } - ``` - -7. (Optional) Obtain the audio renderer information. - - You can use the following code to obtain the audio renderer information: - - ```js - async getRenderInfo(){ - // Obtain the audio renderer state. - let state = this.audioRenderer.state; - // Obtain the audio renderer information. - let audioRendererInfo : audio.AudioRendererInfo = await this.audioRenderer.getRendererInfo(); - // Obtain the audio stream information. - let audioStreamInfo : audio.AudioStreamInfo = await this.audioRenderer.getStreamInfo(); - // Obtain the audio stream ID. - let audioStreamId : number = await this.audioRenderer.getAudioStreamId(); - // Obtain the Unix timestamp, in nanoseconds. - let audioTime : number = await this.audioRenderer.getAudioTime(); - // Obtain a proper minimum buffer size. - let bufferSize : number = await this.audioRenderer.getBufferSize(); - // Obtain the audio renderer rate. - let renderRate : audio.AudioRendererRate = await this.audioRenderer.getRenderRate(); - } - ``` - -8. (Optional) Set the audio renderer information. - - You can use the following code to set the audio renderer information: - - ```js - async setAudioRenderInfo(){ - // Set the audio renderer rate to RENDER_RATE_NORMAL. - let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL; - await this.audioRenderer.setRenderRate(renderRate); - // Set the interruption mode of the audio renderer to SHARE_MODE. - let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE; - await this.audioRenderer.setInterruptMode(interruptMode); - // Set the volume of the stream to 0.5. - let volume : number = 0.5; - await this.audioRenderer.setVolume(volume); - } - ``` - -9. (Optional) Use **on('audioInterrupt')** to subscribe to the audio interruption event, and use **off('audioInterrupt')** to unsubscribe from the event. - - Audio interruption means that Stream A will be interrupted when Stream B with a higher or equal priority requests to become active and use the output device. - - In some cases, the audio renderer performs forcible operations such as pausing and ducking, and notifies the application through **InterruptEvent**. In other cases, the application can choose to act on the **InterruptEvent** or ignore it. - - In the case of audio interruption, the application may encounter write failures. To avoid such failures, interruption-unaware applications can use **audioRenderer.state** to check the audio renderer state before writing audio data. The applications can obtain more details by subscribing to the audio interruption events. For details, see [InterruptEvent](../reference/apis/js-apis-audio.md#interruptevent9). - - It should be noted that the audio interruption event subscription of the **AudioRenderer** module is slightly different from **on('interrupt')** in [AudioManager](../reference/apis/js-apis-audio.md#audiomanager). The **on('interrupt')** and **off('interrupt')** APIs are deprecated since API version 9. In the **AudioRenderer** module, you only need to call **on('audioInterrupt')** to listen for focus change events. When the **AudioRenderer** instance created by the application performs actions such as start, stop, and pause, it requests the focus, which triggers focus transfer and in return enables the related **AudioRenderer** instance to receive a notification through the callback. For instances other than **AudioRenderer**, such as frequency modulation (FM) and voice wakeup, the application does not create an instance. In this case, the application can call **on('interrupt')** in **AudioManager** to receive a focus change notification. - - ```js - async subscribeAudioRender(){ - this.audioRenderer.on('audioInterrupt', (interruptEvent) => { - console.info('InterruptEvent Received'); - console.info(`InterruptType: ${interruptEvent.eventType}`); - console.info(`InterruptForceType: ${interruptEvent.forceType}`); - console.info(`AInterruptHint: ${interruptEvent.hintType}`); - - if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) { - switch (interruptEvent.hintType) { - // Forcible pausing initiated by the audio framework. To prevent data loss, stop the write operation. - case audio.InterruptHint.INTERRUPT_HINT_PAUSE: - console.info('isPlay is false'); - break; - // Forcible stopping initiated by the audio framework. To prevent data loss, stop the write operation. - case audio.InterruptHint.INTERRUPT_HINT_STOP: - console.info('isPlay is false'); - break; - // Forcible ducking initiated by the audio framework. - case audio.InterruptHint.INTERRUPT_HINT_DUCK: - break; - // Undocking initiated by the audio framework. - case audio.InterruptHint.INTERRUPT_HINT_UNDUCK: - break; - } - } else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) { - switch (interruptEvent.hintType) { - // Notify the application that the rendering starts. - case audio.InterruptHint.INTERRUPT_HINT_RESUME: - this.startRenderer(); - break; - // Notify the application that the audio stream is interrupted. The application then determines whether to continue. (In this example, the application pauses the rendering.) - case audio.InterruptHint.INTERRUPT_HINT_PAUSE: - console.info('isPlay is false'); - this.pauseRenderer(); - break; - } - } - }); - } - ``` - -10. (Optional) Use **on('markReach')** to subscribe to the mark reached event, and use **off('markReach')** to unsubscribe from the event. - - After the mark reached event is subscribed to, when the number of frames rendered by the audio renderer reaches the specified value, a callback is triggered and the specified value is returned. - - ```js - async markReach(){ - this.audioRenderer.on('markReach', 50, (position) => { - if (position == 50) { - console.info('ON Triggered successfully'); - } - }); - this.audioRenderer.off('markReach'); // Unsubscribe from the mark reached event. This event will no longer be listened for. - } - ``` - -11. (Optional) Use **on('periodReach')** to subscribe to the period reached event, and use **off('periodReach')** to unsubscribe from the event. - - After the period reached event is subscribed to, each time the number of frames rendered by the audio renderer reaches the specified value, a callback is triggered and the specified value is returned. - - ```js - async periodReach(){ - this.audioRenderer.on('periodReach',10, (reachNumber) => { - console.info(`In this period, the renderer reached frame: ${reachNumber} `); - }); - - this.audioRenderer.off('periodReach'); // Unsubscribe from the period reached event. This event will no longer be listened for. - } - ``` - -12. (Optional) Use **on('stateChange')** to subscribe to audio renderer state changes. - - After the **stateChange** event is subscribed to, when the audio renderer state changes, a callback is triggered and the audio renderer state is returned. - - ```js - async stateChange(){ - this.audioRenderer.on('stateChange', (audioState) => { - console.info('State change event Received'); - console.info(`Current renderer state is: ${audioState}`); - }); - } - ``` - -13. (Optional) Handle exceptions of **on()**. - - If the string or the parameter type passed in **on()** is incorrect , the application throws an exception. In this case, you can use **try catch** to capture the exception. - - ```js - async errorCall(){ - try { - this.audioRenderer.on('invalidInput', () => { // The string is invalid. - }) - } catch (err) { - console.info(`Call on function error, ${err}`); // The application throws exception 401. - } - try { - this.audioRenderer.on(1, () => { // The type of the input parameter is incorrect. - }) - } catch (err) { - console.info(`Call on function error, ${err}`); // The application throws exception 6800101. - } - } - ``` - -14. (Optional) Refer to the complete example of **on('audioInterrupt')**. - Declare audioRenderer1 and audioRenderer2 first. For details, see step 1. - Create **AudioRender1** and **AudioRender2** in an application, configure the independent interruption mode, and call **on('audioInterrupt')** to subscribe to audio interruption events. At the beginning, **AudioRender1** has the focus. When **AudioRender2** attempts to obtain the focus, **AudioRender1** receives a focus transfer notification and the related log information is printed. If the shared mode is used, the log information will not be printed during application running. - ```js - async runningAudioRender1(){ - let audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S32LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - let audioRendererInfo = { - content: audio.ContentType.CONTENT_TYPE_MUSIC, - usage: audio.StreamUsage.STREAM_USAGE_MEDIA, - rendererFlags: 0 // 0 is the extended flag bit of the audio renderer. The default value is 0. - } - let audioRendererOptions = { - streamInfo: audioStreamInfo, - rendererInfo: audioRendererInfo - } - - // 1.1 Create an instance. - this.audioRenderer1 = await audio.createAudioRenderer(audioRendererOptions); - console.info("Create audio renderer 1 success."); - - // 1.2 Set the independent mode. - this.audioRenderer1.setInterruptMode(1).then( data => { - console.info('audioRenderer1 setInterruptMode Success!'); - }).catch((err) => { - console.error(`audioRenderer1 setInterruptMode Fail: ${err}`); - }); - - // 1.3 Set the listener. - this.audioRenderer1.on('audioInterrupt', async(interruptEvent) => { - console.info(`audioRenderer1 on audioInterrupt : ${JSON.stringify(interruptEvent)}`) - }); - - // 1.4 Start rendering. - await this.audioRenderer1.start(); - console.info('startAudioRender1 success'); - - // 1.5 Obtain the buffer size, which is the proper minimum buffer size of the audio renderer. You can also select a buffer of another size. - const bufferSize = await this.audioRenderer1.getBufferSize(); - console.info(`audio bufferSize: ${bufferSize}`); - - // 1.6 Obtain the original audio data file. - let dir = globalThis.fileDir; // You must use the sandbox path. - const path1 = dir + '/music001_48000_32_1.wav'; // The file to render is in the following path: /data/storage/el2/base/haps/entry/files/music001_48000_32_1.wav - console.info(`audioRender1 file path: ${ path1}`); - let file1 = fs.openSync(path1, fs.OpenMode.READ_ONLY); - let stat = await fs.stat(path1); // Music file information. - let buf = new ArrayBuffer(bufferSize); - let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); - - // 1.7 Render the original audio data in the buffer by using audioRender. - for (let i = 0;i < len; i++) { - let options = { - offset: i * this.bufferSize, - length: this.bufferSize - } - let readsize = await fs.read(file1.fd, buf, options) - let writeSize = await new Promise((resolve,reject)=>{ - this.audioRenderer1.write(buf,(err,writeSize)=>{ - if(err){ - reject(err) - }else{ - resolve(writeSize) - } - }) - }) - } - fs.close(file1) - await this.audioRenderer1.stop(); // Stop rendering. - await this.audioRenderer1.release(); // Release the resources. - } - - async runningAudioRender2(){ - let audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S32LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - let audioRendererInfo = { - content: audio.ContentType.CONTENT_TYPE_MUSIC, - usage: audio.StreamUsage.STREAM_USAGE_MEDIA, - rendererFlags: 0 // 0 is the extended flag bit of the audio renderer. The default value is 0. - } - let audioRendererOptions = { - streamInfo: audioStreamInfo, - rendererInfo: audioRendererInfo - } - - // 2.1 Create another instance. - this.audioRenderer2 = await audio.createAudioRenderer(audioRendererOptions); - console.info("Create audio renderer 2 success."); - - // 2.2 Set the independent mode. - this.audioRenderer2.setInterruptMode(1).then( data => { - console.info('audioRenderer2 setInterruptMode Success!'); - }).catch((err) => { - console.error(`audioRenderer2 setInterruptMode Fail: ${err}`); - }); - - // 2.3 Set the listener. - this.audioRenderer2.on('audioInterrupt', async(interruptEvent) => { - console.info(`audioRenderer2 on audioInterrupt : ${JSON.stringify(interruptEvent)}`) - }); - - // 2.4 Start rendering. - await this.audioRenderer2.start(); - console.info('startAudioRender2 success'); - - // 2.5 Obtain the buffer size. - const bufferSize = await this.audioRenderer2.getBufferSize(); - console.info(`audio bufferSize: ${bufferSize}`); - - // 2.6 Read the original audio data file. - let dir = globalThis.fileDir; // You must use the sandbox path. - const path2 = dir + '/music002_48000_32_1.wav'; // The file to render is in the following path: /data/storage/el2/base/haps/entry/files/music002_48000_32_1.wav - console.info(`audioRender2 file path: ${ path2}`); - let file2 = fs.openSync(path2, fs.OpenMode.READ_ONLY); - let stat = await fs.stat(path2); // Music file information. - let buf = new ArrayBuffer(bufferSize); - let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); - - // 2.7 Render the original audio data in the buffer by using audioRender. - for (let i = 0;i < len; i++) { - let options = { - offset: i * this.bufferSize, - length: this.bufferSize - } - let readsize = await fs.read(file2.fd, buf, options) - let writeSize = await new Promise((resolve,reject)=>{ - this.audioRenderer2.write(buf,(err,writeSize)=>{ - if(err){ - reject(err) - }else{ - resolve(writeSize) - } - }) - }) - } - fs.close(file2) - await this.audioRenderer2.stop(); // Stop rendering. - await this.audioRenderer2.release(); // Release the resources. - } - - // Integrated invoking entry. - async test(){ - await this.runningAudioRender1(); - await this.runningAudioRender2(); - } - - ``` \ No newline at end of file diff --git a/en/application-dev/media/audio-routing-manager.md b/en/application-dev/media/audio-routing-manager.md deleted file mode 100644 index 55febdca0fad968d946601fce4faed99bc148dd2..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-routing-manager.md +++ /dev/null @@ -1,111 +0,0 @@ -# Audio Routing and Device Management Development - -## Overview - -The **AudioRoutingManager** module provides APIs for audio routing and device management. You can use the APIs to obtain the current input and output audio devices, listen for connection status changes of audio devices, and activate communication devices. - -## Working Principles - -The figure below shows the common APIs provided by the **AudioRoutingManager** module. - -**Figure 1** Common APIs of AudioRoutingManager - -![en-us_image_audio_routing_manager](figures/en-us_image_audio_routing_manager.png) - -You can use these APIs to obtain the device list, subscribe to or unsubscribe from device connection status changes, activate communication devices, and obtain their activation status. For details, see [Audio Management](../reference/apis/js-apis-audio.md). - - -## How to Develop - -For details about the APIs, see [AudioRoutingManager in Audio Management](../reference/apis/js-apis-audio.md#audioroutingmanager9). - -1. Obtain an **AudioRoutingManager** instance. - - Before using an API in **AudioRoutingManager**, you must use **getRoutingManager()** to obtain an **AudioRoutingManager** instance. - - ```js - import audio from '@ohos.multimedia.audio'; - async loadAudioRoutingManager() { - var audioRoutingManager = await audio.getAudioManager().getRoutingManager(); - console.info('audioRoutingManager------create-------success.'); - } - - ``` - -2. (Optional) Obtain the device list and subscribe to device connection status changes. - - To obtain the device list (such as input, output, distributed input, and distributed output devices) or listen for connection status changes of audio devices, refer to the following code: - - ```js - import audio from '@ohos.multimedia.audio'; - // Obtain an AudioRoutingManager instance. - async loadAudioRoutingManager() { - var audioRoutingManager = await audio.getAudioManager().getRoutingManager(); - console.info('audioRoutingManager------create-------success.'); - } - // Obtain information about all audio devices. (You can set DeviceFlag as required.) - async getDevices() { - await loadAudioRoutingManager(); - await audioRoutingManager.getDevices(audio.DeviceFlag.ALL_DEVICES_FLAG).then((data) => { - console.info(`getDevices success and data is: ${JSON.stringify(data)}.`); - }); - } - // Subscribe to connection status changes of audio devices. - async onDeviceChange() { - await loadAudioRoutingManager(); - await audioRoutingManager.on('deviceChange', audio.DeviceFlag.ALL_DEVICES_FLAG, (deviceChanged) => { - console.info('on device change type : ' + deviceChanged.type); - console.info('on device descriptor size : ' + deviceChanged.deviceDescriptors.length); - console.info('on device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceRole); - console.info('on device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceType); - }); - } - // Unsubscribe from the connection status changes of audio devices. - async offDeviceChange() { - await loadAudioRoutingManager(); - await audioRoutingManager.off('deviceChange', (deviceChanged) => { - console.info('off device change type : ' + deviceChanged.type); - console.info('off device descriptor size : ' + deviceChanged.deviceDescriptors.length); - console.info('off device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceRole); - console.info('off device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceType); - }); - } - // Complete process: Call APIs to obtain all devices and subscribe to device changes, then manually change the connection status of a device (for example, wired headset), and finally call APIs to obtain all devices and unsubscribe from the device changes. - async test(){ - await getDevices(); - await onDeviceChange()(); - // Manually disconnect or connect devices. - await getDevices(); - await offDeviceChange(); - } - ``` - -3. (Optional) Activate a communication device and obtain its activation status. - - ```js - import audio from '@ohos.multimedia.audio'; - // Obtain an AudioRoutingManager instance. - async loadAudioRoutingManager() { - var audioRoutingManager = await audio.getAudioManager().getRoutingManager(); - console.info('audioRoutingManager------create-------success.'); - } - // Activate a communication device. - async setCommunicationDevice() { - await loadAudioRoutingManager(); - await audioRoutingManager.setCommunicationDevice(audio.CommunicationDeviceType.SPEAKER, true).then(() => { - console.info('setCommunicationDevice true is success.'); - }); - } - // Obtain the activation status of the communication device. - async isCommunicationDeviceActive() { - await loadAudioRoutingManager(); - await audioRoutingManager.isCommunicationDeviceActive(audio.CommunicationDeviceType.SPEAKER).then((value) => { - console.info(`CommunicationDevice state is: ${value}.`); - }); - } - // Complete process: Activate a device and obtain the activation status. - async test(){ - await setCommunicationDevice(); - await isCommunicationDeviceActive(); - } - ``` diff --git a/en/application-dev/media/audio-stream-manager.md b/en/application-dev/media/audio-stream-manager.md deleted file mode 100644 index 44ec37cd11f3666131214e5e908a1ce761fea111..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-stream-manager.md +++ /dev/null @@ -1,164 +0,0 @@ -# Audio Stream Management Development - -## Introduction - -You can use **AudioStreamManager** to manage audio streams. - -## Working Principles - -The following figure shows the calling relationship of **AudioStreamManager** APIs. - -**Figure 1** AudioStreamManager API calling relationship - -![en-us_image_audio_stream_manager](figures/en-us_image_audio_stream_manager.png) - -**NOTE**: During application development, use **getStreamManager()** to create an **AudioStreamManager** instance. Then, you can call **on('audioRendererChange')** or **on('audioCapturerChange')** to listen for status, client, and audio attribute changes of the audio playback or recording application. To cancel the listening for these changes, call **off('audioRendererChange')** or **off('audioCapturerChange')**. You can call **getCurrentAudioRendererInfoArray()** to obtain information about the audio playback application, such as the unique audio stream ID, UID of the audio playback client, and audio status. Similarly, you can call **getCurrentAudioCapturerInfoArray()** to obtain information about the audio recording application. - -## How to Develop - -For details about the APIs, see [AudioStreamManager](../reference/apis/js-apis-audio.md#audiostreammanager9). - -1. Create an **AudioStreamManager** instance. - - Before using **AudioStreamManager** APIs, you must use **getStreamManager()** to create an **AudioStreamManager** instance. - - ```js - var audioManager = audio.getAudioManager(); - var audioStreamManager = audioManager.getStreamManager(); - ``` - -2. (Optional) Call **on('audioRendererChange')** to listen for audio renderer changes. - - If an application needs to receive notifications when the audio playback application status, audio playback client, or audio attribute changes, it can subscribe to this event. For more events that can be subscribed to, see [Audio Management](../reference/apis/js-apis-audio.md). - - ```js - audioStreamManager.on('audioRendererChange', (AudioRendererChangeInfoArray) => { - for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) { - AudioRendererChangeInfo = AudioRendererChangeInfoArray[i]; - console.info('## RendererChange on is called for ' + i + ' ##'); - console.info('StreamId for ' + i + ' is:' + AudioRendererChangeInfo.streamId); - console.info('ClientUid for ' + i + ' is:' + AudioRendererChangeInfo.clientUid); - console.info('Content for ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.content); - console.info('Stream for ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.usage); - console.info('Flag ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.rendererFlags); - console.info('State for ' + i + ' is:' + AudioRendererChangeInfo.rendererState); - var devDescriptor = AudioRendererChangeInfo.deviceDescriptors; - for (let j = 0; j < AudioRendererChangeInfo.deviceDescriptors.length; j++) { - console.info('Id:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].id); - console.info('Type:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceType); - console.info('Role:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceRole); - console.info('Name:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].name); - console.info('Address:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].address); - console.info('SampleRates:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]); - console.info('ChannelCounts' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]); - console.info('ChannelMask:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelMasks); - } - } - }); - ``` - -3. (Optional) Call **off('audioRendererChange')** to cancel listening for audio renderer changes. - - ```js - audioStreamManager.off('audioRendererChange'); - console.info('######### RendererChange Off is called #########'); - ``` - -4. (Optional) Call **on('audioCapturerChange')** to listen for audio capturer changes. - - If an application needs to receive notifications when the audio recording application status, audio recording client, or audio attribute changes, it can subscribe to this event. For more events that can be subscribed to, see [Audio Management](../reference/apis/js-apis-audio.md). - - ```js - audioStreamManager.on('audioCapturerChange', (AudioCapturerChangeInfoArray) => { - for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) { - console.info(' ## audioCapturerChange on is called for element ' + i + ' ##'); - console.info('StreamId for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].streamId); - console.info('ClientUid for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].clientUid); - console.info('Source for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.source); - console.info('Flag ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags); - console.info('State for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerState); - for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) { - console.info('Id:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id); - console.info('Type:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType); - console.info('Role:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole); - console.info('Name:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name); - console.info('Address:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address); - console.info('SampleRates:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]); - console.info('ChannelCounts' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]); - console.info('ChannelMask:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks); - } - } - }); - ``` - -5. (Optional) Call **off('audioCapturerChange')** to cancel listening for audio capturer changes. - - ```js - audioStreamManager.off('audioCapturerChange'); - console.info('######### CapturerChange Off is called #########'); - ``` - -6. (Optional) Call **getCurrentAudioRendererInfoArray()** to obtain information about the current audio renderer. - - This API can be used to obtain the unique ID of the audio stream, UID of the audio playback client, audio status, and other information about the audio player. Before calling this API, a third-party application must have the **ohos.permission.USE_BLUETOOTH** permission configured, for the device name and device address to be displayed correctly. - - ```js - await audioStreamManager.getCurrentAudioRendererInfoArray().then( function (AudioRendererChangeInfoArray) { - console.info('######### Get Promise is called ##########'); - if (AudioRendererChangeInfoArray != null) { - for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) { - AudioRendererChangeInfo = AudioRendererChangeInfoArray[i]; - console.info('StreamId for ' + i +' is:' + AudioRendererChangeInfo.streamId); - console.info('ClientUid for ' + i + ' is:' + AudioRendererChangeInfo.clientUid); - console.info('Content ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.content); - console.info('Stream' + i +' is:' + AudioRendererChangeInfo.rendererInfo.usage); - console.info('Flag' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.rendererFlags); - console.info('State for ' + i + ' is:' + AudioRendererChangeInfo.rendererState); - var devDescriptor = AudioRendererChangeInfo.deviceDescriptors; - for (let j = 0; j < AudioRendererChangeInfo.deviceDescriptors.length; j++) { - console.info('Id:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].id); - console.info('Type:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceType); - console.info('Role:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceRole); - console.info('Name:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].name); - console.info('Address:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].address); - console.info('SampleRates:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]); - console.info('ChannelCounts' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]); - console.info('ChannelMask:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelMasks); - } - } - } - }).catch((err) => { - console.log('getCurrentAudioRendererInfoArray :ERROR: ' + err.message); - }); - ``` - -7. (Optional) Call **getCurrentAudioCapturerInfoArray()** to obtain information about the current audio capturer. - This API can be used to obtain the unique ID of the audio stream, UID of the audio recording client, audio status, and other information about the audio capturer. Before calling this API, a third-party application must have the **ohos.permission.USE_BLUETOOTH** permission configured, for the device name and device address to be displayed correctly. - - ```js - await audioStreamManager.getCurrentAudioCapturerInfoArray().then( function (AudioCapturerChangeInfoArray) { - console.info('getCurrentAudioCapturerInfoArray: **** Get Promise Called ****'); - if (AudioCapturerChangeInfoArray != null) { - for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) { - console.info('StreamId for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].streamId); - console.info('ClientUid for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].clientUid); - console.info('Source for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.source); - console.info('Flag ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags); - console.info('State for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerState); - var devDescriptor = AudioCapturerChangeInfoArray[i].deviceDescriptors; - for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) { - console.info('Id:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id); - console.info('Type:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType); - console.info('Role:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole); - console.info('Name:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name) - console.info('Address:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address); - console.info('SampleRates:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]); - console.info('ChannelCounts' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]); - console.info('ChannelMask:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks); - } - } - } - }).catch((err) => { - console.log('getCurrentAudioCapturerInfoArray :ERROR: ' + err.message); - }); - ``` diff --git a/en/application-dev/media/audio-volume-manager.md b/en/application-dev/media/audio-volume-manager.md deleted file mode 100644 index 28ed3dcbc8709609d092a96065a70996b4f487b5..0000000000000000000000000000000000000000 --- a/en/application-dev/media/audio-volume-manager.md +++ /dev/null @@ -1,126 +0,0 @@ -# Volume Management Development - -## Overview - -The **AudioVolumeManager** module provides APIs for volume management. You can use the APIs to obtain the volume of a stream, listen for ringer mode changes, and mute a microphone. - -## Working Principles - -The figure below shows the common APIs provided by the **AudioVolumeManager** module. - -**Figure 1** Common APIs of AudioVolumeManager - -![en-us_image_audio_volume_manager](figures/en-us_image_audio_volume_manager.png) - -**AudioVolumeManager** provides the APIs for subscribing to system volume changes and obtaining the audio volume group manager (an **AudioVolumeGroupManager** instance). Before calling any API in **AudioVolumeGroupManager**, you must call **getVolumeGroupManager** to obtain an **AudioVolumeGroupManager** instance. You can use the APIs provided by **AudioVolumeGroupManager** to obtain the volume of a stream, mute a microphone, and listen for microphone state changes. For details, see [Audio Management](../reference/apis/js-apis-audio.md). - -## Constraints - -Before developing a microphone management application, configure the permission **ohos.permission.MICROPHONE** for the application. To set the microphone state, configure the permission **ohos.permission.MANAGE_AUDIO_CONFIG** (a system permission). For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file). - -## How to Develop - -For details about the APIs, see [AudioVolumeManager in Audio Management](../reference/apis/js-apis-audio.md#audiovolumemanager9) - -1. Obtain an **AudioVolumeGroupManager** instance. - - Before using an API in **AudioVolumeGroupManager**, you must use **getVolumeGroupManager()** to obtain an **AudioStreamManager** instance. - - ```js - import audio from '@ohos.multimedia.audio'; - async loadVolumeGroupManager() { - const groupid = audio.DEFAULT_VOLUME_GROUP_ID; - var audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid); - console.error('audioVolumeGroupManager create success.'); - } - - ``` - -2. (Optional) Obtain the volume information and ringer mode. - - To obtain the volume information of an audio stream (such as the ringtone, voice call, media, and voice assistant) or obtain the ringer mode (silent, vibration, or normal) of the current device, refer to the code below. For more details, see [Audio Management](../reference/apis/js-apis-audio.md). - - ```js - import audio from '@ohos.multimedia.audio'; - async loadVolumeGroupManager() { - const groupid = audio.DEFAULT_VOLUME_GROUP_ID; - var audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid); - console.info('audioVolumeGroupManager create success.'); - } - - // Obtain the volume of a stream. The value ranges from 0 to 15. - async getVolume() { - await loadVolumeGroupManager(); - await audioVolumeGroupManager.getVolume(audio.AudioVolumeType.MEDIA).then((value) => { - console.info(`getVolume success and volume is: ${value}.`); - }); - } - // Obtain the minimum volume of a stream. - async getMinVolume() { - await loadVolumeGroupManager(); - await audioVolumeGroupManager.getMinVolume(audio.AudioVolumeType.MEDIA).then((value) => { - console.info(`getMinVolume success and volume is: ${value}.`); - }); - } - // Obtain the maximum volume of a stream. - async getMaxVolume() { - await loadVolumeGroupManager(); - await audioVolumeGroupManager.getMaxVolume(audio.AudioVolumeType.MEDIA).then((value) => { - console.info(`getMaxVolume success and volume is: ${value}.`); - }); - } - // Obtain the ringer mode in use: silent (0) | vibrate (1) | normal (2). - async getRingerMode() { - await loadVolumeGroupManager(); - await audioVolumeGroupManager.getRingerMode().then((value) => { - console.info(`getRingerMode success and RingerMode is: ${value}.`); - }); - } - ``` - -3. (Optional) Obtain and set the microphone state, and subscribe to microphone state changes. - - To obtain and set the microphone state or subscribe to microphone state changes, refer to the following code: - - ```js - import audio from '@ohos.multimedia.audio'; - async loadVolumeGroupManager() { - const groupid = audio.DEFAULT_VOLUME_GROUP_ID; - var audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid); - console.info('audioVolumeGroupManager create success.'); - } - - async on() { // Subscribe to microphone state changes. - await loadVolumeGroupManager(); - await audioVolumeGroupManager.audioVolumeGroupManager.on('micStateChange', (micStateChange) => { - console.info(`Current microphone status is: ${micStateChange.mute} `); - }); - } - - async isMicrophoneMute() { // Check whether the microphone is muted. - await audioVolumeGroupManager.audioVolumeGroupManager.isMicrophoneMute().then((value) => { - console.info(`isMicrophoneMute is: ${value}.`); - }); - } - - async setMicrophoneMuteTrue() { // Mute the microphone. - await loadVolumeGroupManager(); - await audioVolumeGroupManager.audioVolumeGroupManager.setMicrophoneMute(true).then(() => { - console.info('setMicrophoneMute to mute.'); - }); - } - - async setMicrophoneMuteFalse() { // Unmute the microphone. - await loadVolumeGroupManager(); - await audioVolumeGroupManager.audioVolumeGroupManager.setMicrophoneMute(false).then(() => { - console.info('setMicrophoneMute to not mute.'); - }); - } - async test(){ // Complete process: Subscribe to microphone state changes, obtain the microphone state, mute the microphone, obtain the microphone state, and then unmute the microphone. - await on(); - await isMicrophoneMute(); - await setMicrophoneMuteTrue(); - await isMicrophoneMute(); - await setMicrophoneMuteFalse(); - } - ``` diff --git a/en/application-dev/media/av-overview.md b/en/application-dev/media/av-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..eb0ea76dbfa90a3d3e3dd13e98ecf40876714310 --- /dev/null +++ b/en/application-dev/media/av-overview.md @@ -0,0 +1,66 @@ +# Audio and Video Overview + +You will learn how to use the audio and video APIs provided by the multimedia subsystem to develop a wealth of audio and video playback or recording scenarios. For example, you can use the **TonePlayer** class to implement simple prompt tones so that a drip sound is played upon the receipt of a new message, or use the **AVPlayer** class to develop a music player, which can loop a piece of music. + +For every functionality provided by the multimedia subsystem, you will learn multiple implementation modes, each of which corresponds to a specific usage scenario. You will also learn the sub-functionalities in these scenarios. For example, in the **Audio Playback** chapter, you will learn audio concurrency policies, volume management, and output device processing methods. All these will help you develop an application with more comprehensive features. + +This development guide applies only to audio and video playback and recording, which are implemented by the [@ohos.multimedia.audio](../reference/apis/js-apis-audio.md) and [@ohos.multimedia.media](../reference/apis/js-apis-media.md) modules. The UI, image processing, media storage, or other related capabilities are not covered. + +## Development Description + +Before developing an audio feature, especially before implementing audio data processing, you are advised to understand the following acoustic concepts. This will help you understand how the OpenHarmony APIs control the audio module and how to develop audio and video applications that are easier to use and deliver better experience. + +- Audio quantization process: sampling > quantization > encoding + +- Concepts related to audio quantization: analog signal, digital signal, sampling rate, audio channel, sample format, bit width, bit rate, common encoding formats (such as AAC, MP3, PCM, and WMA), and common encapsulation formats (such as WAV, MPA, FLAC, AAC, and OGG) + +Before developing features related to audio and video playback, you are advised to understand the following concepts: + +- Playback process: network protocol > container format > audio and video codec > graphics/audio rendering +- Network protocols: HLS, HTTP, HTTPS, and more +- Container formats: MP4, MKV, MPEG-TS, WebM, and more +- Encoding formats: H.263/H.264/H.265, MPEG4/MPEG2, and more + +## Introduction to Audio Streams + +An audio stream is an independent audio data processing unit that has a specific audio format and audio usage scenario information. The audio stream can be used in playback and recording scenarios, and supports independent volume adjustment and audio device routing. + +The basic audio stream information is defined by [AudioStreamInfo](../reference/apis/js-apis-audio.md#audiostreaminfo8), which includes the sampling, audio channel, bit width, and encoding information. It describes the basic attributes of audio data and is mandatory for creating an audio playback or recording stream. To enable the audio module to correctly process audio data, the configured basic information must match the transmitted audio data. + +### Audio Stream Usage Scenario Information + +In addition to the basic information (which describes only audio data), an audio stream has usage scenario information. This is because audio streams differ in the volume, device routing, and concurrency policy. The system chooses an appropriate processing policy for an audio stream based on the usage scenario information, thereby delivering the optimal user experience. + +- Playback scenario + +Information about the audio playback scenario is defined by using [StreamUsage](../reference/apis/js-apis-audio.md#streamusage) and [ContentType](../reference/apis/js-apis-audio.md#contenttype). + +- **StreamUsage** specifies the usage type of an audio stream, for example, used for media, voice communication, voice assistant, notification, and ringtone. + +- **ContentType** specifies the content type of data in an audio stream, for example, speech, music, movie, notification tone, and ringtone. + +- Recording scenario + +Information about the audio stream recording scenario is defined by [SourceType](../reference/apis/js-apis-audio.md#sourcetype8). + + **SourceType** specifies the recording source type of an audio stream, including the mic source, voice recognition source, and voice communication source. + +## Supported Audio Formats + +The APIs of the audio module support PCM encoding, including AudioRenderer, AudioCapturer, TonePlayer, and OpenSL ES. + +Be familiar with the following about the audio format: + +- The common audio sampling rates are supported: 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, and 96000, in units of Hz. For details, see [AudioSamplingRate](../reference/apis/js-apis-audio.md#audiosamplingrate8). + +The sampling rate varies according to the device type. + +- Mono and stereo are supported. For details, see [AudioChannel](../reference/apis/js-apis-audio.md#audiochannel8). + +- The following sampling formats are supported: U8 (unsigned 8-bit integer), S16LE (signed 16-bit integer, little endian), S24LE (signed 24-bit integer, little endian), S32LE (signed 32-bit integer, little endian), and F32LE (signed 32-bit floating point number, little endian). For details, see [AudioSampleFormat](../reference/apis/js-apis-audio.md#audiosampleformat8). + +Due to system restrictions, only some devices support the sampling formats S24LE, S32LE, and F32LE. + + Little endian means that the most significant byte is stored at the largest memory address and the least significant byte of data is stored at the smallest. This storage mode effectively combines the memory address with the bit weight of the data. Specifically, the largest memory address has a high weight, and the smallest memory address has a low weight. + +The audio and video formats supported by the APIs of the media module are described in [AVPlayer and AVRecorder](avplayer-avrecorder-overview.md). diff --git a/en/application-dev/media/avplayer-avrecorder-overview.md b/en/application-dev/media/avplayer-avrecorder-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..051ca3b66ce1839046a2e783a8c274c304625045 --- /dev/null +++ b/en/application-dev/media/avplayer-avrecorder-overview.md @@ -0,0 +1,148 @@ +# AVPlayer and AVRecorder + +The media module provides the [AVPlayer](#avplayer) and [AVRecorder](#avrecorder) class to implement audio and video playback and recording. + +## AVPlayer + +The AVPlayer transcodes audio and video media assets (such as MP4, MP3, MKV, and MPEG-TS) into renderable images and hearable audio analog signals, and plays the audio and video through output devices. + +The AVPlayer provides the integrated playback capability. This means that your application only needs to provide streaming media sources to implement media playback. It does not need to parse or decode data. + + +### Audio Playback + +The figure below shows the interaction when the **AVPlayer** class is used to develop a music application. + +**Figure 1** Interaction with external modules for audio playback + +![Audio playback interaction diagram](figures/audio-playback-interaction-diagram.png) + +When a music application calls the **AVPlayer** APIs at the JS interface layer to implement audio playback, the player framework at the framework layer parses the media asset into audio data streams (in PCM format). The audio data streams are then decoded by software and output to the audio framework. The audio framework outputs the audio data streams to the audio HDI for rendering. A complete audio playback process requires the cooperation of the application, player framework, audio framework, and audio HDI. + +In Figure 1, the numbers indicate the process where data is transferred to external modules. + +1. The music application transfers the media asset to the **AVPlayer** instance. + +2. The player framework outputs the audio PCM data streams to the audio framework, which then outputs the data streams to the audio HDI. + +### Video Playback + +The figure below shows the interaction when the **AVPlayer** class is used to develop a video application. + +**Figure 2** Interaction with external modules for video playback + +![Video playback interaction diagram](figures/video-playback-interaction-diagram.png) + +When the video application calls the **AVPlayer** APIs at the JS interface layer to implement audio and video playback, the player framework at the framework layer parses the media asset into separate audio data streams and video data streams. The audio data streams are then decoded by software and output to the audio framework. The audio framework outputs the audio data streams to the audio HDI at the hardware interface layer to implement audio playback. The video data streams are then decoded by hardware (recommended) or software and output to the graphic framework. The graphic framework outputs the video data streams to the display HDI at the hardware interface layer to implement graphics rendering. + +A complete video playback process requires the cooperation of the application, XComponent, player framework, graphic framework, audio framework, display HDI, and audio HDI. + +In Figure 2, the numbers indicate the process where data is transferred to external modules. + +1. The application obtains a window surface ID from the XComponent. For details about how to obtain the window surface ID, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md). + +2. The application transfers the media asset and surface ID to the **AVPlayer** instance. + +3. The player framework outputs the video elementary streams (ESs) to the decoding HDI to obtain video frames (NV12/NV21/RGBA). + +4. The player framework outputs the audio PCM data streams to the audio framework, which then outputs the data streams to the audio HDI. + +5. The player framework outputs the video frames (NV12/NV21/RGBA) to the graphic framework, which then outputs the video frames to the display HDI. + +### Supported Formats and Protocols + +Audio and video containers and codecs are domains specific to content creators. You are advised to use the mainstream playback formats, rather than custom ones to avoid playback failures, frame freezing, and artifacts. The system will not be affected by incompatibility issues. If such an issue occurs, you can exit playback. + +The table below lists the supported protocols. + +| Scenario| Description| +| -------- | -------- | +| Local VOD| The file descriptor is supported, but the file path is not.| +| Network VoD| HTTP, HTTPS, and HLS are supported.| + +The table below lists the supported audio playback formats. + +| Audio Container Format| Description| +| -------- | -------- | +| M4A| Audio format: AAC| +| AAC| Audio format: AAC| +| MP3| Audio format: MP3| +| OGG| Audio format: VORBIS | +| WAV| Audio format: PCM | + +> **NOTE** +> +> The supported video formats are further classified into mandatory and optional ones. All vendors must support mandatory ones and can determine whether to implement optional ones based on their service requirements. You are advised to perform compatibility processing to ensure that all the application functions are compatible on different platforms. + +| Video Format| Mandatory or Not| +| -------- | -------- | +| H.264 | Yes| +| MPEG-2 | No| +| MPEG-4 | No| +| H.263 | No| +| VP8 | No| + +The table below lists the supported playback formats and mainstream resolutions. + +| Video Container Format| Description| Resolution| +| -------- | -------- | -------- | +| MP4| Video formats: H.264, MPEG-2, MPEG-4, and H.263
Audio formats: AAC and MP3| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p| +| MKV| Video formats: H.264, MPEG-2, MPEG-4, and H.263
Audio formats: AAC and MP3| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p| +| TS| Video formats: H.264, MPEG-2, and MPEG-4
Audio formats: AAC and MP3| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p| +| WebM| Video format: VP8
Audio format: VORBIS| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p| + +## AVRecorder + +The AVRecorder captures audio signals, receives video signals, encodes the audio and video signals, and saves them to files. With the AVRecorder, you can easily implement audio and video recording, including starting, pausing, resuming, and stopping recording, and releasing resources. You can also specify parameters such as the encoding format, encapsulation format, and file path for recording. + +**Figure 3** Interaction with external modules for video recording + +![Video recording interaction diagram](figures/video-recording-interaction-diagram.png) + +- Audio recording: When an application calls the **AVRecorder** APIs at the JS interface layer to implement audio recording, the player framework at the framework layer invokes the audio framework to capture audio data through the audio HDI. The audio data is then encoded by software and saved into a file. + +- Video recording: When an application calls the **AVRecorder** APIs at the JS interface layer to implement video recording, the camera framework is first invoked to capture image data. Through the video encoding HDI, the camera framework sends the data to the player framework at the framework layer. The player framework encodes the image data through the video HDI and saves the encoded image data into a file. + +With the AVRecorder, you can implement pure audio recording, pure video recording, and audio and video recording. + +In Figure 3, the numbers indicate the process where data is transferred to external modules. + +1. The application obtains a surface ID from the player framework through the **AVRecorder** instance. + +2. The application sets the surface ID for the camera framework, which obtains the surface corresponding to the surface ID. The camera framework captures image data through the video HDI and sends the data to the player framework at the framework layer. + +3. The camera framework transfers the video data to the player framework through the surface. + +4. The player framework encodes video data through the video HDI. + +5. The player framework sets the audio parameters for the audio framework and obtains the audio data from the audio framework. + +### Supported Formats + +The table below lists the supported audio sources. + +| Type| Description| +| -------- | -------- | +| mic | The system microphone is used as the audio source input.| + +The table below lists the supported video sources. + +| Type| Description | +| -------- | -------- | +| surface_yuv | The input surface carries raw data.| +| surface_es | The input surface carries ES data.| + +The table below lists the supported audio and video encoding formats. + +| Encoding Format| Description | +| -------- | -------- | +| audio/mp4a-latm | Audio encoding format MP4A-LATM.| +| video/mp4v-es | Video encoding format MPEG-4.| +| video/avc | Video encoding format AVC.| + +The table below lists the supported output file formats. + +| Format| Description | +| -------- | -------- | +| MP4| Video container format MP4.| +| M4A| Audio container format M4A.| diff --git a/en/application-dev/media/avplayer-playback.md b/en/application-dev/media/avplayer-playback.md deleted file mode 100644 index 9a7d9ffa10e2de83e676adbd2c5af7f9b3ba35af..0000000000000000000000000000000000000000 --- a/en/application-dev/media/avplayer-playback.md +++ /dev/null @@ -1,477 +0,0 @@ -# AVPlayer Development - -## Introduction - -The AVPlayer converts audio or video resources into audible analog signals or renderable images and plays the signals or images using output devices. You can manage playback tasks on the AVPlayer. For example, you can control the playback (start/pause/stop/seek), set the volume, obtain track information, and release resources. - -## Working Principles - -The following figures show the [AVPlayer state](../reference/apis/js-apis-media.md#avplayerstate9) transition and interaction with external audio and video playback modules. - -**Figure 1** AVPlayer state transition - -![en-us_image_avplayer_state_machine](figures/en-us_image_avplayer_state_machine.png) - -**Figure 2** Interaction with external modules for audio playback - -![en-us_image_avplayer_audio](figures/en-us_image_avplayer_audio.png) - -**NOTE**: When an application calls the **AVPlayer** JS APIs at the JS interface layer to implement a feature, the framework layer parses the resources into audio data streams through the playback service of the player framework. The audio data streams are then decoded by software and output to the audio service of the audio framework. The audio framework outputs the audio data streams to the audio HDI at the hardware interface layer to implement audio playback. A complete audio playback process requires the cooperation of the application (application adaptation required), player framework, audio framework, and audio HDI (driver adaptation required). - -1. An application passes a URL into the **AVPlayer** JS API. -2. The playback service outputs the audio PCM data streams to the audio service, and the audio service outputs the data streams to the audio HDI. - - -**Figure 3** Interaction with external modules for video playback - -![en-us_image_avplayer_video](figures/en-us_image_avplayer_video.png) - -**NOTE**: When an application calls the **AVPlayer** JS APIs at the JS interface layer to implement a feature, the framework layer parses the resources into separate audio data streams and video data streams through the playback service of the player framework. The audio data streams are then decoded by software and output to the audio service of the audio framework. The audio framework outputs the audio data streams to the audio HDI at the hardware interface layer to implement audio playback. The video data streams are then decoded by hardware (recommended) or software and output to the renderer service of the graphic framework. The renderer service outputs the video data streams to the display HDI at the hardware interface layer. A complete video playback process requires the cooperation of the application (application adaptation required), XComponent, player framework, graphic framework, audio framework, display HDI (driver adaptation required), and audio HDI (driver adaptation required). - -1. An application obtains the surface ID from the XComponent. For details about the obtaining method, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md). -2. The application passes a URL and the surface ID into the **AVPlayer** JS API. -3. The playback service outputs video elementary streams (ESs) to the codec HDI, which decodes the ESs to obtain video frames (NV12/NV21/RGBA). -4. The playback service outputs the audio PCM data streams to the audio service, and the audio service outputs the data streams to the audio HDI. -5. The playback service outputs video frames (NV12/NV21/RGBA) to the renderer service, and the renderer service outputs the video frames to the display HDI. - -## Compatibility - -Use the mainstream playback formats and resolutions, rather than custom ones to avoid playback failures, frame freezing, and artifacts. The system will not be affected by incompatibility issues. If such an issue occurs, you can exit stream playback. - -The table below lists the mainstream playback formats and resolutions. - -| Video Container Format| Description | Resolution | -| :----------: | :-----------------------------------------------: | :--------------------------------: | -| mp4 | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| -| mkv | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| -| ts | Video format: H.264/MPEG-2/MPEG-4; audio format: AAC/MP3 | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| -| webm | Video format: VP8; audio format: VORBIS | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| - -| Audio Container Format | Description | -| :----------: | :----------: | -| m4a | Audio format: AAC| -| aac | Audio format: AAC| -| mp3 | Audio format: MP3| -| ogg | Audio format: VORBIS | -| wav | Audio format: PCM | - -## How to Develop - -For details about the APIs, see the [AVPlayer APIs in the Media Class](../reference/apis/js-apis-media.md#avplayer9). - -### Full-Process Scenario - -The full playback process includes creating an instance, setting resources, setting a video window, preparing for playback, controlling playback, and resetting or releasing the resources. (During the preparation, you can obtain track information, volume, speed, focus mode, and zoom mode, and set bit rates. To control the playback, you can start, pause, and stop the playback, seek to a playback position, and set the volume.) - -1. Call [createAVPlayer()](../reference/apis/js-apis-media.md#mediacreateavplayer9) to create an **AVPlayer** instance. The AVPlayer is initialized to the [idle](#avplayer_state) state. - -2. Set the events to listen for, which will be used in the full-process scenario. - -3. Set the resource [URL](../reference/apis/js-apis-media.md#avplayer_attributes). When the AVPlayer enters the [initialized](#avplayer_state) state, you can set the [surface ID](../reference/apis/js-apis-media.md#avplayer_attributes) for the video window. For details about the supported specifications, see [AVPlayer Attributes](../reference/apis/js-apis-media.md#avplayer_attributes). - -4. Call [prepare()](../reference/apis/js-apis-media.md#avplayer_prepare) to switch the AVPlayer to the [prepared](#avplayer_state) state. - -5. Perform video playback control. For example, you can call [play()](../reference/apis/js-apis-media.md#avplayer_play), [pause()](../reference/apis/js-apis-media.md#avplayer_pause), [seek()](../reference/apis/js-apis-media.md#avplayer_seek), and [stop()](../reference/apis/js-apis-media.md#avplayer_stop) to control the playback. - -6. Call [reset()](../reference/apis/js-apis-media.md#avplayer_reset) to reset resources. The AVPlayer enters the [idle](#avplayer_state) state again, and you can change the resource [URL](../reference/apis/js-apis-media.md#avplayer_attributes). - -7. Call [release()](../reference/apis/js-apis-media.md#avplayer_release) to release the instance. The AVPlayer enters the [released](#avplayer_state) state and exits the playback. - -> **NOTE** -> -> When the AVPlayer is in the prepared, playing, paused, or completed state, the playback engine is working and a large amount of system running memory is occupied. If your application does not need to use the AVPlayer, call **reset()** or **release()** to release the resources. - -### Listening Events - -| Event Type | Description | -| ------------------------------------------------- | ------------------------------------------------------------ | -| stateChange | Mandatory; used to listen for player state changes. | -| error | Mandatory; used to listen for player error information. | -| durationUpdate | Used to listen for progress bar updates to refresh the resource duration. | -| timeUpdate | Used to listen for the current position of the progress bar to refresh the current time. | -| seekDone | Used to listen for the completion status of the **seek()** request. | -| speedDone | Used to listen for the completion status of the **setSpeed()** request. | -| volumeChange | Used to listen for the completion status of the **setVolume()** request. | -| bitrateDone | Used to listen for the completion status of the **setBitrate()** request, which is used for HTTP Live Streaming (HLS) streams. | -| availableBitrates | Used to listen for available bit rates of HLS resources. The available bit rates are provided for **setBitrate()**. | -| bufferingUpdate | Used to listen for network playback buffer information. | -| startRenderFrame | Used to listen for the rendering time of the first frame during video playback. | -| videoSizeChange | Used to listen for the width and height of video playback and adjust the window size and ratio.| -| audioInterrupt | Used to listen for audio interruption during video playback. This event is used together with the **audioInterruptMode** attribute.| - -### Full-Process Scenario API Example - -```js -import media from '@ohos.multimedia.media' -import audio from '@ohos.multimedia.audio'; -import fs from '@ohos.file.fs' - -const TAG = 'AVPlayerDemo:' -export class AVPlayerDemo { - private count:number = 0 - private avPlayer - private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. - - // Set AVPlayer callback functions. - setAVPlayerCallback() { - // Callback function for state changes. - this.avPlayer.on('stateChange', async (state, reason) => { - switch (state) { - case 'idle': // This state is reported upon a successful callback of reset(). - console.info(TAG + 'state idle called') - this.avPlayer.release() // Release the AVPlayer instance. - break; - case 'initialized': // This state is reported when the AVPlayer sets the playback source. - console.info(TAG + 'state initialized called ') - this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played. - this.avPlayer.prepare().then(() => { - console.info(TAG+ 'prepare success'); - }, (err) => { - console.error(TAG + 'prepare filed,error message is :' + err.message) - }) - break; - case 'prepared': // This state is reported upon a successful callback of prepare(). - console.info(TAG + 'state prepared called') - this.avPlayer.play() // Call play() to start playback. - break; - case 'playing': // This state is reported upon a successful callback of play(). - console.info(TAG + 'state playing called') - if (this.count == 0) { - this.avPlayer.pause() // Call pause() to pause the playback. - } else { - this.avPlayer.seek(10000, media.SeekMode.SEEK_PREV_SYNC) // Seek to 10 seconds. The seekDone callback is triggered. - } - break; - case 'paused': // This state is reported upon a successful callback of pause(). - console.info(TAG + 'state paused called') - if (this.count == 0) { - this.count++ - this.avPlayer.play() // Call play() to continue the playback. - } - break; - case 'completed': // This state is reported upon the completion of the playback. - console.info(TAG + 'state completed called') - this.avPlayer.stop() // Call stop() to stop the playback. - break; - case 'stopped': // This state is reported upon a successful callback of stop(). - console.info(TAG + 'state stopped called') - this.avPlayer.reset() // Call reset() to initialize the AVPlayer state. - break; - case 'released': - console.info(TAG + 'state released called') - break; - case 'error': - console.info(TAG + 'state error called') - break; - default: - console.info(TAG + 'unkown state :' + state) - break; - } - }) - // Callback function for time updates. - this.avPlayer.on('timeUpdate', (time:number) => { - console.info(TAG + 'timeUpdate success,and new time is :' + time) - }) - // Callback function for volume updates. - this.avPlayer.on('volumeChange', (vol:number) => { - console.info(TAG + 'volumeChange success,and new volume is :' + vol) - this.avPlayer.setSpeed(media.AVPlayerSpeed.SPEED_FORWARD_2_00_X) // Double the playback speed. The speedDone callback is triggered. - }) - // Callback function for the video playback completion event. - this.avPlayer.on('endOfStream', () => { - console.info(TAG + 'endOfStream success') - }) - // Callback function for the seek operation. - this.avPlayer.on('seekDone', (seekDoneTime:number) => { - console.info(TAG + 'seekDone success,and seek time is:' + seekDoneTime) - this.avPlayer.setVolume(0.5) // Set the volume to 0.5. The volumeChange callback is triggered. - }) - // Callback function for the speed setting operation. - this.avPlayer.on('speedDone', (speed:number) => { - console.info(TAG + 'speedDone success,and speed value is:' + speed) - }) - // Callback function for successful bit rate setting. - this.avPlayer.on('bitrateDone', (bitrate:number) => { - console.info(TAG + 'bitrateDone success,and bitrate value is:' + bitrate) - }) - // Callback function for buffering updates. - this.avPlayer.on('bufferingUpdate', (infoType: media.BufferingInfoType, value: number) => { - console.info(TAG + 'bufferingUpdate success,and infoType value is:' + infoType + ', value is :' + value) - }) - // Callback function invoked when frame rendering starts. - this.avPlayer.on('startRenderFrame', () => { - console.info(TAG + 'startRenderFrame success') - }) - // Callback function for video width and height changes. - this.avPlayer.on('videoSizeChange', (width: number, height: number) => { - console.info(TAG + 'videoSizeChange success,and width is:' + width + ', height is :' + height) - }) - // Callback function for the audio interruption event. - this.avPlayer.on('audioInterrupt', (info: audio.InterruptEvent) => { - console.info(TAG + 'audioInterrupt success,and InterruptEvent info is:' + info) - }) - // Callback function to report the available bit rates of HLS. - this.avPlayer.on('availableBitrates', (bitrates: Array) => { - console.info(TAG + 'availableBitrates success,and availableBitrates length is:' + bitrates.length) - }) - } - - async avPlayerDemo() { - // Create an AVPlayer instance. - this.avPlayer = await media.createAVPlayer() - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command. - let path = pathDir + '/H264_AAC.mp4' - let file = await fs.open(path) - fdPath = fdPath + '' + file.fd - this.avPlayer.url = fdPath - } -} -``` - -### Normal Playback Scenario - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -const TAG = 'AVPlayerDemo:' -export class AVPlayerDemo { - private avPlayer - private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. - - // Set AVPlayer callback functions. - setAVPlayerCallback() { - // Callback function for state changes. - this.avPlayer.on('stateChange', async (state, reason) => { - switch (state) { - case 'idle': // This state is reported upon a successful callback of reset(). - console.info(TAG + 'state idle called') - break; - case 'initialized': // This state is reported when the AVPlayer sets the playback source. - console.info(TAG + 'state initialized called ') - this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played. - this.avPlayer.prepare().then(() => { - console.info(TAG+ 'prepare success'); - }, (err) => { - console.error(TAG + 'prepare filed,error message is :' + err.message) - }) - break; - case 'prepared': // This state is reported upon a successful callback of prepare(). - console.info(TAG + 'state prepared called') - this.avPlayer.play() // Call play() to start playback. - break; - case 'playing': // This state is reported upon a successful callback of play(). - console.info(TAG + 'state playing called') - break; - case 'paused': // This state is reported upon a successful callback of pause(). - console.info(TAG + 'state paused called') - break; - case 'completed': // This state is reported upon the completion of the playback. - console.info(TAG + 'state completed called') - this.avPlayer.stop() // Call stop() to stop the playback. - break; - case 'stopped': // This state is reported upon a successful callback of stop(). - console.info(TAG + 'state stopped called') - this.avPlayer.release() // Call reset() to initialize the AVPlayer state. - break; - case 'released': - console.info(TAG + 'state released called') - break; - case 'error': - console.info(TAG + 'state error called') - break; - default: - console.info(TAG + 'unkown state :' + state) - break; - } - }) - } - - async avPlayerDemo() { - // Create an AVPlayer instance. - this.avPlayer = await media.createAVPlayer() - let fileDescriptor = undefined - // Use getRawFileDescriptor of the resource management module to obtain the media assets in the application, and use the fdSrc attribute of the AVPlayer to initialize the media assets. - // For details on the fd/offset/length parameter, see the Media API. The globalThis.abilityContext parameter is a system environment variable and is saved as a global variable on the main page during the system boost. - await globalThis.abilityContext.resourceManager.getRawFileDescriptor('H264_AAC.mp4').then((value) => { - fileDescriptor = {fd: value.fd, offset: value.offset, length: value.length} - }) - this.avPlayer.fdSrc = fileDescriptor - } -} -``` - -### Looping a Song - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -const TAG = 'AVPlayerDemo:' -export class AVPlayerDemo { - private count:number = 0 - private avPlayer - private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. - - // Set AVPlayer callback functions. - setAVPlayerCallback() { - // Callback function for state changes. - this.avPlayer.on('stateChange', async (state, reason) => { - switch (state) { - case 'idle': // This state is reported upon a successful callback of reset(). - console.info(TAG + 'state idle called') - break; - case 'initialized': // This state is reported when the AVPlayer sets the playback source. - console.info(TAG + 'state initialized called ') - this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played. - this.avPlayer.prepare().then(() => { - console.info(TAG+ 'prepare success'); - }, (err) => { - console.error(TAG + 'prepare filed,error message is :' + err.message) - }) - break; - case 'prepared': // This state is reported upon a successful callback of prepare(). - console.info(TAG + 'state prepared called') - this.avPlayer.loop = true // Set the AVPlayer to loop a single item. The endOfStream callback is triggered when the previous round of the playback is complete. - this.avPlayer.play() // Call play() to start playback. - break; - case 'playing': // This state is reported upon a successful callback of play(). - console.info(TAG + 'state playing called') - break; - case 'paused': // This state is reported upon a successful callback of pause(). - console.info(TAG + 'state paused called') - break; - case 'completed': // This state is reported upon the completion of the playback. - console.info(TAG + 'state completed called') - // Cancel the loop playback when the endOfStream callback is triggered for the second time. The completed state is reported when the next round of the playback is complete. - this.avPlayer.stop() // Call stop() to stop the playback. - break; - case 'stopped': // This state is reported upon a successful callback of stop(). - console.info(TAG + 'state stopped called') - this.avPlayer.release() // Call reset() to initialize the AVPlayer state. - break; - case 'released': - console.info(TAG + 'state released called') - break; - case 'error': - console.info(TAG + 'state error called') - break; - default: - console.info(TAG + 'unkown state :' + state) - break; - } - }) - // Callback function for the video playback completion event. - this.avPlayer.on('endOfStream', () => { - console.info(TAG + 'endOfStream success') - if (this.count == 1) { - this.avPlayer.loop = false // Cancel loop playback. - } else { - this.count++ - } - }) - } - - async avPlayerDemo() { - // Create an AVPlayer instance. - this.avPlayer = await media.createAVPlayer() - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command. - let path = pathDir + '/H264_AAC.mp4' - let file = await fs.open(path) - fdPath = fdPath + '' + file.fd - this.avPlayer.url = fdPath - } -} -``` -### Switching to the Next Video Clip - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' - -const TAG = 'AVPlayerDemo:' -export class AVPlayerDemo { - private count:number = 0 - private avPlayer - private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. - - async nextVideo() { - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_MP3.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command. - let path = pathDir + '/H264_MP3.mp4' - let file = await fs.open(path) - fdPath = fdPath + '' + file.fd - this.avPlayer.url = fdPath // The initialized state is reported again. - } - - // Set AVPlayer callback functions. - setAVPlayerCallback() { - // Callback function for state changes. - this.avPlayer.on('stateChange', async (state, reason) => { - switch (state) { - case 'idle': // This state is reported upon a successful callback of reset(). - console.info(TAG + 'state idle called') - await this.nextVideo() // Switch to the next video. - break; - case 'initialized': // This state is reported when the AVPlayer sets the playback source. - console.info(TAG + 'state initialized called ') - this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played. - this.avPlayer.prepare().then(() => { - console.info(TAG+ 'prepare success'); - }, (err) => { - console.error(TAG + 'prepare filed,error message is :' + err.message) - }) - break; - case 'prepared': // This state is reported upon a successful callback of prepare(). - console.info(TAG + 'state prepared called') - this.avPlayer.play() // Call play() to start playback. - break; - case 'playing': // This state is reported upon a successful callback of play(). - console.info(TAG + 'state playing called') - break; - case 'paused': // This state is reported upon a successful callback of pause(). - console.info(TAG + 'state paused called') - break; - case 'completed': // This state is reported upon the completion of the playback. - console.info(TAG + 'state completed called') - if (this.count == 0) { - this.count++ - this.avPlayer.reset() // Call reset() to prepare for switching to the next video. - } else { - this.avPlayer.release() // Release the AVPlayer instance when the new video finishes playing. - } - break; - case 'stopped': // This state is reported upon a successful callback of stop(). - console.info(TAG + 'state stopped called') - break; - case 'released': - console.info(TAG + 'state released called') - break; - case 'error': - console.info(TAG + 'state error called') - break; - default: - console.info(TAG + 'unkown state :' + state) - break; - } - }) - } - - async avPlayerDemo() { - // Create an AVPlayer instance. - this.avPlayer = await media.createAVPlayer() - let fdPath = 'fd://' - let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements. - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command. - let path = pathDir + '/H264_AAC.mp4' - let file = await fs.open(path) - fdPath = fdPath + '' + file.fd - this.avPlayer.url = fdPath - } -} -``` diff --git a/en/application-dev/media/avrecorder.md b/en/application-dev/media/avrecorder.md deleted file mode 100644 index 9214df032d7d060cabe9900e8a0d5ab6e7aa12f9..0000000000000000000000000000000000000000 --- a/en/application-dev/media/avrecorder.md +++ /dev/null @@ -1,488 +0,0 @@ -# AVRecorder Development - -## Introduction - -The AVRecorder captures audio signals, receives video signals, encodes audio and video signals, and saves them to files. With the AVRecorder, you can easily implement audio and video recording, including starting, pausing, resuming, and stopping recording, and releasing resources. You can also specify parameters such as the encoding format, encapsulation format, and file path for recording. - -## Working Principles - -The following figures show the AVRecorder state transition and the interaction with external modules for audio and video recording. - -**Figure 1** AVRecorder state transition - -![en-us_image_video_recorder_state_machine](figures/en-us_image_avrecorder_state_machine.png) - -**Figure 2** Interaction between external modules for audio and video recording - -![en-us_image_video_recorder_zero](figures/en-us_image_avrecorder_module_interaction.png) - -**NOTE**: During audio recording, the framework layer calls the audio subsystem through the media service of the native framework to capture audio data through the audio HDI, encodes and encapsulates the data by using software, and saves the data to a file. During video recording, the camera subsystem captures image data through the video HDI. The media service encodes the image data through the video encoding HDI and encapsulates the encoded image data into a file. With the AVRecorder, you can implement pure audio recording, pure video recording, and audio and video recording. - -## Constraints - -Before developing the recording feature, configure permissions for your application. If audio recording is involved, obtain the permission **ohos.permission.MICROPHONE** by following the instructions provided in [Permission Application Guide](../security/accesstoken-guidelines.md). - -To use the camera to record videos, the camera module is required. For details about how to use the APIs and obtain permissions, see [Camera Management](../reference/apis/js-apis-camera.md). - -## How to Develop - -For details about the AVRecorder APIs, see the [AVRecorder APIs in the Media Class](../reference/apis/js-apis-media.md#avrecorder9). - -For details about the processes related to the media library, see [Media Library Management](../reference/apis/js-apis-medialibrary.md). - -For details about the camera-related process, see [Camera Management](../reference/apis/js-apis-camera.md). - -### Full-Process Scenario of Audio and Video Recording - -The full audio and video recording process includes creating an instance, setting recording parameters, obtaining the input surface, starting, pausing, resuming, and stopping recording, and releasing resources. - -The value range that can be set for the audio recording parameters is restricted by the codec performance of the device and the performance of the audio subsystem. - -The video range that can be set for the video recording parameters is restricted by the codec performance of the device and the performance of the camera subsystem. - -``` -import media from '@ohos.multimedia.media' -import camera from '@ohos.multimedia.camera' -import mediaLibrary from '@ohos.multimedia.mediaLibrary' - -export class AVRecorderDemo { - private testFdNumber; // Used to save the File Descriptor (FD) address. - - // Obtain the FD corresponding to fileName of the recorded file. The media library capability is required. To use the media library, configure the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA. - async getFd(fileName) { - // For details about the implementation mode, see the media library documentation. - this.testFdNumber = "fd://" + fdNumber.toString(); // e.g. fd://54 - } - - // Error callback triggered in the case of an error in the promise mode. - failureCallback(error) { - console.info('error happened, error message is ' + error.message); - } - - // Error callback triggered in the case of an exception in the promise mode. - catchCallback(error) { - console.info('catch error happened, error message is ' + error.message); - } - - async AVRecorderDemo() { - let AVRecorder; // Assign a value to the empty AVRecorder instance upon a successful call of createAVRecorder(). - let surfaceID; // The surface ID is obtained by calling getInputSurface and transferred to the videoOutput object of the camera. - await this.getFd('01.mp4'); - - // Configure the parameters related to audio and video recording based on those supported by the hardware device. - let avProfile = { - audioBitrate : 48000, - audioChannels : 2, - audioCodec : media.CodecMimeType.AUDIO_AAC, - audioSampleRate : 48000, - fileFormat : media.ContainerFormatType.CFT_MPEG_4, - videoBitrate : 2000000, - videoCodec : media.CodecMimeType.VIDEO_MPEG4, - videoFrameWidth : 640, - videoFrameHeight : 480, - videoFrameRate : 30 - } - let avConfig = { - audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, - videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, - profile : avProfile, - url : 'fd://', - rotation : 0, - location : { latitude : 30, longitude : 130 } - } - - // Create an AVRecorder instance. - await media.createAVRecorder().then((recorder) => { - console.info('case createAVRecorder called'); - if (typeof (recorder) != 'undefined') { - AVRecorder = recorder; - console.info('createAVRecorder success'); - } else { - console.info('createAVRecorder failed'); - } - }, this.failureCallback).catch(this.catchCallback); - - // After the instance is created, use the on('stateChange') and on('error') callbacks to listen for state changes and errors. - AVRecorder.on('stateChange', async (state, reason) => { - console.info('case state has changed, new state is :' + state); - switch (state) { - // Your can set the desired behavior in different states as required. - case 'idle': - // This state is reported upon a successful call of rest() or create(). - break; - case 'prepared': - // This state is reported upon a successful call of prepare(). - break; - case 'started': - // This state is reported upon a successful call of start(). - break; - case 'paused': - // This state is reported upon a successful call of pause(). - break; - case 'stopped': - // This state is reported upon a successful call of stop(). - break; - case 'released': - // This state is reported upon a successful call of release(). - break; - case 'error': - // The error state indicates that an error occurs at the bottom layer. You must rectify the fault and create an AVRecorder instance again. - break; - default: - console.info('case state is unknown'); - } - }); - AVRecorder.on('error', (err) => { - // Listen for non-interface errors. - console.info('case avRecorder.on(error) called, errMessage is ' + err.message); - }); - - // Call prepare() to prepare for recording. The bottom layer determines whether to record audio, video, or audio and video based on the input parameters of prepare(). - await AVRecorder.prepare(avConfig).then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // If video recording is involved, call getInputSurface to obtain the input surface and pass the returned surface ID to the related camera API. - await AVRecorder.getInputSurface().then((surface) => { - console.info('getInputSurface success'); - surfaceID = surface; // The surfaceID is passed into createVideoOutput() of the camera as an input parameter. - }, this.failureCallback).catch(this.catchCallback); - - // Video recording depends on camera-related APIs. The following operations can be performed only after the video output start API is invoked. - // Start video recording. - await AVRecorder.start().then(() => { - console.info('start success'); - }, this.failureCallback).catch(this.catchCallback); - - // Pause video recording before the video output stop API of the camera is invoked. - await AVRecorder.pause().then(() => { - console.info('pause success'); - }, this.failureCallback).catch(this.catchCallback); - - // Resume video recording after the video output start API of the camera is invoked. - await AVRecorder.resume().then(() => { - console.info('resume success'); - }, this.failureCallback).catch(this.catchCallback); - - // Stop video recording after the video output stop API of the camera is invoked. - await AVRecorder.stop().then(() => { - console.info('stop success'); - }, this.failureCallback).catch(this.catchCallback); - - // Reset the recording configuration. - await AVRecorder.reset().then(() => { - console.info('reset success'); - }, this.failureCallback).catch(this.catchCallback); - - // Disable the listeners. The configured callbacks will be invalid after release() is invoked, even if you do not call off(). - AVRecorder.off('stateChange'); - AVRecorder.off('error'); - - // Release the video recording resources and camera object resources. - await AVRecorder.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the AVRecorder instance to null. - AVRecorder = undefined; - surfaceID = undefined; - } -} -``` - -### Full-Process Scenario of Pure Audio Recording - -The full audio recording process includes creating an instance, setting recording parameters, starting, pausing, resuming, and stopping recording, and releasing resources. - -The value range that can be set for the audio recording parameters is restricted by the codec performance of the device and the performance of the audio subsystem. - -``` -import media from '@ohos.multimedia.media' -import mediaLibrary from '@ohos.multimedia.mediaLibrary' - -export class AudioRecorderDemo { - private testFdNumber; // Used to save the FD address. - - // Obtain the FD corresponding to fileName of the recorded file. The media library capability is required. To use the media library, configure the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA. - async getFd(fileName) { - // For details about the implementation mode, see the media library documentation. - this.testFdNumber = "fd://" + fdNumber.toString(); // e.g. fd://54 - } - - // Error callback triggered in the case of an error in the promise mode. - failureCallback(error) { - console.info('error happened, error message is ' + error.message); - } - - // Error callback triggered in the case of an exception in the promise mode. - catchCallback(error) { - console.info('catch error happened, error message is ' + error.message); - } - - async audioRecorderDemo() { - let audioRecorder; // Assign a value to the empty AudioRecorder instance upon a successful call of createAVRecorder(). - await this.getFd('01.m4a'); - // Configure the parameters related to audio recording. - let audioProfile = { - audioBitrate : 48000, - audioChannels : 2, - audioCodec : media.CodecMimeType.AUDIO_AAC, - audioSampleRate : 48000, - fileFormat : media.ContainerFormatType.CFT_MPEG_4, - } - let audioConfig = { - audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, - profile : audioProfile, - url : this.testFdNumber, - rotation : 0, - location : { latitude : 30, longitude : 130 } - } - - // Create an AudioRecorder instance. - await media.createAVRecorder().then((recorder) => { - console.info('case createAVRecorder called'); - if (typeof (recorder) != 'undefined') { - audioRecorder = recorder; - console.info('createAudioRecorder success'); - } else { - console.info('createAudioRecorder failed'); - } - }, this.failureCallback).catch(this.catchCallback); - - // After the instance is created, use the on('stateChange') and on('error') callbacks to listen for state changes and errors. - audioRecorder.on('stateChange', async (state, reason) => { - console.info('case state has changed, new state is :' + state); - switch (state) { - // Your can set the desired behavior in different states as required. - case 'idle': - // This state is reported upon a successful call of rest() or create(). - break; - case 'prepared': - // This state is reported upon a successful call of prepare(). - break; - case 'started': - // This state is reported upon a successful call of start(). - break; - case 'paused': - // This state is reported upon a successful call of pause(). - break; - case 'stopped': - // This state is reported upon a successful call of stop(). - break; - case 'released': - // This state is reported upon a successful call of release(). - break; - case 'error': - // The error state indicates that an error occurs at the bottom layer. You must rectify the fault and create an AudioRecorder instance again. - break; - default: - console.info('case state is unknown'); - } - }); - audioRecorder.on('error', (err) => { - // Listen for non-interface errors. - console.info('case avRecorder.on(error) called, errMessage is ' + err.message); - }); - - // Call prepare() to prepare for recording. The bottom layer determines whether to record audio, video, or audio and video based on the input parameters of prepare(). - await audioRecorder.prepare(audioConfig).then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call start() to start audio recording. - await audioRecorder.start().then(() => { - console.info('start success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call pause() to pause audio recording. - await audioRecorder.pause().then(() => { - console.info('pause success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call resume() to resume audio recording. - await audioRecorder.resume().then(() => { - console.info('resume success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call stop() to stop audio recording. - await audioRecorder.stop().then(() => { - console.info('stop success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call reset() to reset the recording configuration. - await audioRecorder.reset().then(() => { - console.info('reset success'); - }, this.failureCallback).catch(this.catchCallback); - - // Disable the listeners. The configured callbacks will be invalid after release() is invoked, even if you do not call off(). - avRecorder.off('stateChange'); - avRecorder.off('error'); - - // Call release() to release audio recording resources. - await audioRecorder.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the AudioRecorder instance to null. - audioRecorder = undefined; - } -} - -``` - -### Full-Process Scenario of Pure Video Recording - -The full video recording process includes creating an instance, setting recording parameters, obtaining the input surface, starting, pausing, resuming, and stopping recording, and releasing resources. - -The video range that can be set for the video recording parameters is restricted by the codec performance of the device and the performance of the camera subsystem. - -``` -import media from '@ohos.multimedia.media' -import camera from '@ohos.multimedia.camera' -import mediaLibrary from '@ohos.multimedia.mediaLibrary' - -export class VideoRecorderDemo { - private testFdNumber; // Used to save the FD address. - - // Obtain the FD corresponding to fileName of the recorded file. The media library capability is required. To use the media library, configure the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA. - async getFd(fileName) { - // For details about the implementation mode, see the media library documentation. - this.testFdNumber = "fd://" + fdNumber.toString(); // e.g. fd://54 - } - - // Error callback triggered in the case of an error in the promise mode. - failureCallback(error) { - console.info('error happened, error message is ' + error.message); - } - - // Error callback triggered in the case of an exception in the promise mode. - catchCallback(error) { - console.info('catch error happened, error message is ' + error.message); - } - - async videoRecorderDemo() { - let videoRecorder; // Assign a value to the empty VideoRecorder instance upon a successful call of createAVRecorder(). - let surfaceID; // The surface ID is obtained by calling getInputSurface and transferred to the videoOutput object of the camera. - await this.getFd('01.mp4'); - - // Configure the parameters related to pure video recording based on those supported by the hardware device. - let videoProfile = { - fileFormat : media.ContainerFormatType.CFT_MPEG_4, - videoBitrate : 2000000, - videoCodec : media.CodecMimeType.VIDEO_MPEG4, - videoFrameWidth : 640, - videoFrameHeight : 480, - videoFrameRate : 30 - } - let videoConfig = { - videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, - profile : videoProfile, - url : 'fd://', - rotation : 0, - location : { latitude : 30, longitude : 130 } - } - - // Create a VideoRecorder instance. - await media.createAVRecorder().then((recorder) => { - console.info('case createVideoRecorder called'); - if (typeof (recorder) != 'undefined') { - videoRecorder = recorder; - console.info('createVideoRecorder success'); - } else { - console.info('createVideoRecorder failed'); - } - }, this.failureCallback).catch(this.catchCallback); - - // After the instance is created, use the on('stateChange') and on('error') callbacks to listen for state changes and errors. - videoRecorder.on('stateChange', async (state, reason) => { - console.info('case state has changed, new state is :' + state); - switch (state) { - // Your can set the desired behavior in different states as required. - case 'idle': - // This state is reported upon a successful call of rest() or create(). - break; - case 'prepared': - // This state is reported upon a successful call of prepare(). - break; - case 'started': - // This state is reported upon a successful call of start(). - break; - case 'paused': - // This state is reported upon a successful call of pause(). - break; - case 'stopped': - // This state is reported upon a successful call of stop(). - break; - case 'released': - // This state is reported upon a successful call of release(). - break; - case 'error': - // The error state indicates that an error occurs at the bottom layer. You must rectify the fault and create a VideoRecorder instance again. - break; - default: - console.info('case state is unknown'); - } - }); - videoRecorder.on('error', (err) => { - // Listen for non-interface errors. - console.info('case avRecorder.on(error) called, errMessage is ' + err.message); - }); - - // Call prepare() to prepare for recording. The bottom layer determines whether to record audio, video, or audio and video based on the input parameters of prepare(). - await videoRecorder.prepare(videoConfig).then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // If video recording is involved, call getInputSurface to obtain the input surface and pass the returned surface ID to the related camera API. - await videoRecorder.getInputSurface().then((surface) => { - console.info('getInputSurface success'); - surfaceID = surface; // The surfaceID is passed into createVideoOutput() of the camera as an input parameter. - }, this.failureCallback).catch(this.catchCallback); - - // Video recording depends on camera-related APIs. The following operations can be performed only after the video output start API is invoked. - // Start video recording. - await videoRecorder.start().then(() => { - console.info('start success'); - }, this.failureCallback).catch(this.catchCallback); - - // Pause video recording before the video output stop API of the camera is invoked. - await videoRecorder.pause().then(() => { - console.info('pause success'); - }, this.failureCallback).catch(this.catchCallback); - - // Resume video recording after the video output start API of the camera is invoked. - await videoRecorder.resume().then(() => { - console.info('resume success'); - }, this.failureCallback).catch(this.catchCallback); - - // Stop video recording after the video output stop API of the camera is invoked. - await videoRecorder.stop().then(() => { - console.info('stop success'); - }, this.failureCallback).catch(this.catchCallback); - - // Reset the recording configuration. - await videoRecorder.reset().then(() => { - console.info('reset success'); - }, this.failureCallback).catch(this.catchCallback); - - // Disable the listeners. The configured callbacks will be invalid after release() is invoked, even if you do not call off(). - videoRecorder.off('stateChange'); - videoRecorder.off('error'); - - // Release the video recording resources and camera object resources. - await videoRecorder.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the VideoRecorder instance to null. - videoRecorder = undefined; - surfaceID = undefined; - } -} -``` - -### AVRecorder App - -The AVRecorder app provides a complete audio and video recording process, which includes creating an instance, setting recording parameters, obtaining the input surface, starting, pausing, resuming, and stopping recording, and releasing resources. - -For details about the code, see [AVRecorderDemo]([multimedia_player_framework: Implementation of media playback and recording](https://gitee.com/openharmony/multimedia_player_framework/tree/master/test/appdemo/AVRecorderDemo)). diff --git a/en/application-dev/media/avsession-guidelines.md b/en/application-dev/media/avsession-guidelines.md deleted file mode 100644 index 3d1ac479f0f358c42778e60a0d4b47edafe0a0cd..0000000000000000000000000000000000000000 --- a/en/application-dev/media/avsession-guidelines.md +++ /dev/null @@ -1,633 +0,0 @@ -# AVSession Development - -> **NOTE** -> -> All APIs of the **AVSession** module are system APIs and can be called only by system applications. - -## Development for the Session Access End - -### Basic Concepts -- **AVMetadata**: media data related attributes, including the IDs of the current media asset, previous media asset, and next media asset, title, author, album, writer, and duration. -- **AVSessionDescriptor**: descriptor about a media session, including the session ID, session type (audio/video), custom session name (**sessionTag**), and information about the corresponding application (**elementName**). -- **AVPlaybackState**: information related to the media playback state, including the playback state, position, speed, buffered time, loop mode, and whether the media asset is favorited (**isFavorite**). - -### Available APIs -The table below lists the APIs available for the development of the session access end. The APIs use either a callback or promise to return the result. The APIs listed below use a callback, which provide the same functions as their counterparts that use a promise. For details, see [AVSession Management](../reference/apis/js-apis-avsession.md). - -Table 1 Common APIs for session access end development - -| API | Description | -|----------------------------------------------------------------------------------|-------------| -| createAVSession(context: Context, tag: string, type: AVSessionType, callback: AsyncCallback\): void | Creates a session.| -| setAVMetadata(data: AVMetadata, callback: AsyncCallback\): void | Sets session metadata. | -| setAVPlaybackState(state: AVPlaybackState, callback: AsyncCallback\): void | Sets the playback state information. | -| setLaunchAbility(ability: WantAgent, callback: AsyncCallback\): void | Sets the launcher ability.| -| getController(callback: AsyncCallback\): void | Obtains the controller of this session.| -| getOutputDevice(callback: AsyncCallback\): void | Obtains the output device information. | -| activate(callback: AsyncCallback\): void | Activates this session. | -| destroy(callback: AsyncCallback\): void | Destroys this session. | - -### How to Develop -1. Import the modules. - -```js -import avSession from '@ohos.multimedia.avsession'; -import wantAgent from '@ohos.app.ability.wantAgent'; -import featureAbility from '@ohos.ability.featureAbility'; -``` - -2. Create and activate a session. -```js -// Define global variables. -let mediaFavorite = false; -let currentSession = null; -let context = featureAbility.getContext(); - -// Create an audio session. -avSession.createAVSession(context, "AudioAppSample", 'audio').then((session) => { - currentSession = session; - currentSession.activate(); // Activate the session. -}).catch((err) => { - console.info(`createAVSession : ERROR : ${err.message}`); -}); -``` - -3. Set the session information, including: -- Session metadata. In addition to the current media asset ID (mandatory), you can set the title, album, author, duration, and previous/next media asset ID. For details about the session metadata, see **AVMetadata** in the API document. -- Launcher ability, which is implemented by calling an API of [WantAgent](../reference/apis/js-apis-app-ability-wantAgent.md). Generally, **WantAgent** is used to encapsulate want information. -- Playback state information. -```js -// Set the session metadata. -let metadata = { - assetId: "121278", - title: "lose yourself", - artist: "Eminem", - author: "ST", - album: "Slim shady", - writer: "ST", - composer: "ST", - duration: 2222, - mediaImage: "https://www.example.com/example.jpg", // Set it based on your project requirements. - subtitle: "8 Mile", - description: "Rap", - lyric: "https://www.example.com/example.lrc", // Set it based on your project requirements. - previousAssetId: "121277", - nextAssetId: "121279", -}; -currentSession.setAVMetadata(metadata).then(() => { - console.info('setAVMetadata successfully'); -}).catch((err) => { - console.info(`setAVMetadata : ERROR : ${err.message}`); -}); -``` - -```js -// Set the launcher ability. -let wantAgentInfo = { - wants: [ - { - bundleName: "com.neu.setResultOnAbilityResultTest1", - abilityName: "com.example.test.EntryAbility", - } - ], - operationType: wantAgent.OperationType.START_ABILITIES, - requestCode: 0, - wantAgentFlags:[wantAgent.WantAgentFlags.UPDATE_PRESENT_FLAG] -} - -wantAgent.getWantAgent(wantAgentInfo).then((agent) => { - currentSession.setLaunchAbility(agent).then(() => { - console.info('setLaunchAbility successfully'); - }).catch((err) => { - console.info(`setLaunchAbility : ERROR : ${err.message}`); - }); -}); -``` - -```js -// Set the playback state information. -let PlaybackState = { - state: avSession.PlaybackState.PLAYBACK_STATE_STOP, - speed: 1.0, - position:{elapsedTime: 0, updateTime: (new Date()).getTime()}, - bufferedTime: 1000, - loopMode: avSession.LoopMode.LOOP_MODE_SEQUENCE, - isFavorite: false, -}; -currentSession.setAVPlaybackState(PlaybackState).then(() => { - console.info('setAVPlaybackState successfully'); -}).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); -}); -``` - -```js -// Obtain the controller of this session. -currentSession.getController().then((selfController) => { - console.info('getController successfully'); -}).catch((err) => { - console.info(`getController : ERROR : ${err.message}`); -}); -``` - -```js -// Obtain the output device information. -currentSession.getOutputDevice().then((outputInfo) => { - console.info(`getOutputDevice successfully, deviceName : ${outputInfo.deviceName}`); -}).catch((err) => { - console.info(`getOutputDevice : ERROR : ${err.message}`); -}); -``` - -4. Subscribe to control command events. -```js -// Subscribe to the 'play' command event. -currentSession.on('play', () => { - console.log ("Call AudioPlayer.play."); - // Set the playback state information. - currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PLAY}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - - -// Subscribe to the 'pause' command event. -currentSession.on('pause', () => { - console.log ("Call AudioPlayer.pause."); - // Set the playback state information. - currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PAUSE}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'stop' command event. -currentSession.on('stop', () => { - console.log ("Call AudioPlayer.stop."); - // Set the playback state information. - currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_STOP}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'playNext' command event. -currentSession.on('playNext', () => { - // When the media file is not ready, download and cache the media file, and set the 'PREPARE' state. - currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PREPARE}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); - // The media file is obtained. - currentSession.setAVMetadata({assetId: '58970105', title: 'See you tomorrow'}).then(() => { - console.info('setAVMetadata successfully'); - }).catch((err) => { - console.info(`setAVMetadata : ERROR : ${err.message}`); - }); - console.log ("Call AudioPlayer.play."); - // Set the playback state information. - let time = (new Date()).getTime(); - currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PLAY, position: {elapsedTime: 0, updateTime: time}, bufferedTime:2000}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'fastForward' command event. -currentSession.on('fastForward', () => { - console.log("Call AudioPlayer for fast forwarding."); - // Set the playback state information. - currentSession.setAVPlaybackState({speed: 2.0}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'seek' command event. -currentSession.on('seek', (time) => { - console.log("Call AudioPlayer.seek."); - // Set the playback state information. - currentSession.setAVPlaybackState({position: {elapsedTime: time, updateTime: (new Data()).getTime()}}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'setSpeed' command event. -currentSession.on('setSpeed', (speed) => { - console.log(`Call AudioPlayer to set the speed to ${speed}`); - // Set the playback state information. - currentSession.setAVPlaybackState({speed: speed}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'setLoopMode' command event. -currentSession.on('setLoopMode', (mode) => { - console.log(`The application switches to the loop mode ${mode}`); - // Set the playback state information. - currentSession.setAVPlaybackState({loopMode: mode}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); -}); - -// Subscribe to the 'toggleFavorite' command event. -currentSession.on('toggleFavorite', (assetId) => { - console.log(`The application favorites ${assetId}.`); - // Perform the switch based on the last status. - let favorite = mediaFavorite == false ? true : false; - currentSession.setAVPlaybackState({isFavorite: favorite}).then(() => { - console.info('setAVPlaybackState successfully'); - }).catch((err) => { - console.info(`setAVPlaybackState : ERROR : ${err.message}`); - }); - mediaFavorite = favorite; -}); - -// Subscribe to the key event. -currentSession.on('handleKeyEvent', (event) => { - console.log(`User presses the key ${event.keyCode}`); -}); - -// Subscribe to output device changes. -currentSession.on('outputDeviceChange', (device) => { - console.log(`Output device changed to ${device.deviceName}`); -}); -``` - -5. Release resources. -```js -// Unsubscribe from the events. -currentSession.off('play'); -currentSession.off('pause'); -currentSession.off('stop'); -currentSession.off('playNext'); -currentSession.off('playPrevious'); -currentSession.off('fastForward'); -currentSession.off('rewind'); -currentSession.off('seek'); -currentSession.off('setSpeed'); -currentSession.off('setLoopMode'); -currentSession.off('toggleFavorite'); -currentSession.off('handleKeyEvent'); -currentSession.off('outputDeviceChange'); - -// Deactivate the session and destroy the object. -currentSession.deactivate().then(() => { - currentSession.destroy(); -}); -``` - -### Verification -Touch the play, pause, or next button on the media application. Check whether the media playback state changes accordingly. - -### FAQs - -1. Session Service Exception -- Symptoms - - The session service is abnormal, and the application cannot obtain a response from the session service. For example, the session service is not running or the communication with the session service fails. The error message "Session service exception" is displayed. - -- Possible causes - - The session service is killed during session restart. - -- Solution - - (1) The system retries the operation automatically. If the error persists for 3 seconds or more, stop the operation on the session or controller. - - (2) Destroy the current session or session controller and re-create it. If the re-creation fails, stop the operation on the session. - -2. Session Does Not Exist -- Symptoms - - Parameters are set for or commands are sent to the session that does not exist. The error message "The session does not exist" is displayed. - -- Possible causes - - The session has been destroyed, and no session record exists on the server. - -- Solution - - (1) If the error occurs on the application, re-create the session. If the error occurs on Media Controller, stop sending query or control commands to the session. - - (2) If the error occurs on the session service, query the current session record and pass the correct session ID when creating the controller. - -3. Session Not Activated -- Symptoms - - A control command or event is sent to the session when it is not activated. The error message "The session not active" is displayed. - -- Possible causes - - The session is in the inactive state. - -- Solution - - Stop sending the command or event. Subscribe to the session activation status, and resume the sending when the session is activated. - -## Development for the Session Control End (Media Controller) - -### Basic Concepts -- Remote projection: A local media session is projected to a remote device. The local controller sends commands to control media playback on the remote device. -- Sending key events: The controller controls media playback by sending key events. -- Sending control commands: The controller controls media playback by sending control commands. -- Sending system key events: A system application calls APIs to send system key events to control media playback. -- Sending system control commands: A system application calls APIs to send system control commands to control media playback. - -### Available APIs - -The table below lists the APIs available for the development of the session control end. The APIs use either a callback or promise to return the result. The APIs listed below use a callback, which provide the same functions as their counterparts that use a promise. For details, see [AVSession Management](../reference/apis/js-apis-avsession.md). - -Table 2 Common APIs for session control end development - -| API | Description | -| ------------------------------------------------------------------------------------------------ | ----------------- | -| getAllSessionDescriptors(callback: AsyncCallback\>>): void | Obtains the descriptors of all sessions. | -| createController(sessionId: string, callback: AsyncCallback\): void | Creates a controller. | -| sendAVKeyEvent(event: KeyEvent, callback: AsyncCallback\): void | Sends a key event. | -| getLaunchAbility(callback: AsyncCallback\): void | Obtains the launcher ability. | -| sendControlCommand(command: AVControlCommand, callback: AsyncCallback\): void | Sends a control command. | -| sendSystemAVKeyEvent(event: KeyEvent, callback: AsyncCallback\): void | Send a system key event. | -| sendSystemControlCommand(command: AVControlCommand, callback: AsyncCallback\): void | Sends a system control command. | -| castAudio(session: SessionToken \| 'all', audioDevices: Array\, callback: AsyncCallback\): void | Casts the media session to a remote device.| - -### How to Develop -1. Import the modules. -```js -import avSession from '@ohos.multimedia.avsession'; -import {Action, KeyEvent} from '@ohos.multimodalInput.KeyEvent'; -import wantAgent from '@ohos.app.ability.wantAgent'; -import audio from '@ohos.multimedia.audio'; -``` - -2. Obtain the session descriptors and create a controller. -```js -// Define global variables. -let g_controller = new Array(); -let g_centerSupportCmd:Set = new Set(['play', 'pause', 'playNext', 'playPrevious', 'fastForward', 'rewind', 'seek','setSpeed', 'setLoopMode', 'toggleFavorite']); -let g_validCmd:Set; - -// Obtain the session descriptors and create a controller. -avSession.getAllSessionDescriptors().then((descriptors) => { - descriptors.forEach((descriptor) => { - avSession.createController(descriptor.sessionId).then((controller) => { - g_controller.push(controller); - }).catch((err) => { - console.error('createController error'); - }); - }); -}).catch((err) => { - console.error('getAllSessionDescriptors error'); -}); - -// Subscribe to the 'sessionCreate' event and create a controller. -avSession.on('sessionCreate', (session) => { - // After a session is added, you must create a controller. - avSession.createController(session.sessionId).then((controller) => { - g_controller.push(controller); - }).catch((err) => { - console.info(`createController : ERROR : ${err.message}`); - }); -}); -``` - -3. Subscribe to the session state and service changes. -```js -// Subscribe to the 'activeStateChange' event. -controller.on('activeStateChange', (isActive) => { - if (isActive) { - console.log ("The widget corresponding to the controller is highlighted."); - } else { - console.log("The widget corresponding to the controller is invalid."); - } -}); - -// Subscribe to the 'sessionDestroy' event to enable Media Controller to get notified when the session dies. -controller.on('sessionDestroy', () => { - console.info('on sessionDestroy : SUCCESS '); - controller.destroy().then(() => { - console.info('destroy : SUCCESS '); - }).catch((err) => { - console.info(`destroy : ERROR :${err.message}`); - }); -}); - -// Subscribe to the 'sessionDestroy' event to enable the application to get notified when the session dies. -avSession.on('sessionDestroy', (session) => { - let index = g_controller.findIndex((controller) => { - return controller.sessionId == session.sessionId; - }); - if (index != 0) { - g_controller[index].destroy(); - g_controller.splice(index, 1); - } -}); - -// Subscribe to the 'topSessionChange' event. -avSession.on('topSessionChange', (session) => { - let index = g_controller.findIndex((controller) => { - return controller.sessionId == session.sessionId; - }); - // Place the session on the top. - if (index != 0) { - g_controller.sort((a, b) => { - return a.sessionId == session.sessionId ? -1 : 0; - }); - } -}); - -// Subscribe to the 'sessionServiceDie' event. -avSession.on('sessionServiceDie', () => { - // The server is abnormal, and the application clears resources. - console.log("Server exception"); -}) -``` - -4. Subscribe to media session information changes. -```js -// Subscribe to metadata changes. -let metaFilter = ['assetId', 'title', 'description']; -controller.on('metadataChange', metaFilter, (metadata) => { - console.info(`on metadataChange assetId : ${metadata.assetId}`); -}); - -// Subscribe to playback state changes. -let playbackFilter = ['state', 'speed', 'loopMode']; -controller.on('playbackStateChange', playbackFilter, (playbackState) => { - console.info(`on playbackStateChange state : ${playbackState.state}`); -}); - -// Subscribe to supported command changes. -controller.on('validCommandChange', (cmds) => { - console.info(`validCommandChange : SUCCESS : size : ${cmds.size}`); - console.info(`validCommandChange : SUCCESS : cmds : ${cmds.values()}`); - g_validCmd.clear(); - for (let c of g_centerSupportCmd) { - if (cmds.has(c)) { - g_validCmd.add(c); - } - } -}); - -// Subscribe to output device changes. -controller.on('outputDeviceChange', (device) => { - console.info(`on outputDeviceChange device isRemote : ${device.isRemote}`); -}); -``` - -5. Control the session behavior. -```js -// When the user touches the play button, the control command 'play' is sent to the session. -if (g_validCmd.has('play')) { - controller.sendControlCommand({command:'play'}).then(() => { - console.info('sendControlCommand successfully'); - }).catch((err) => { - console.info(`sendControlCommand : ERROR : ${err.message}`); - }); -} - -// When the user selects the single loop mode, the corresponding control command is sent to the session. -if (g_validCmd.has('setLoopMode')) { - controller.sendControlCommand({command: 'setLoopMode', parameter: avSession.LoopMode.LOOP_MODE_SINGLE}).then(() => { - console.info('sendControlCommand successfully'); - }).catch((err) => { - console.info(`sendControlCommand : ERROR : ${err.message}`); - }); -} - -// Send a key event. -let keyItem = {code: 0x49, pressedTime: 123456789, deviceId: 0}; -let event = {action: 2, key: keyItem, keys: [keyItem]}; -controller.sendAVKeyEvent(event).then(() => { - console.info('sendAVKeyEvent Successfully'); -}).catch((err) => { - console.info(`sendAVKeyEvent : ERROR : ${err.message}`); -}); - -// The user touches the blank area on the widget to start the application. -controller.getLaunchAbility().then((want) => { - console.log("Starting the application in the foreground"); -}).catch((err) => { - console.info(`getLaunchAbility : ERROR : ${err.message}`); -}); - -// Send the system key event. -let keyItem = {code: 0x49, pressedTime: 123456789, deviceId: 0}; -let event = {action: 2, key: keyItem, keys: [keyItem]}; -avSession.sendSystemAVKeyEvent(event).then(() => { - console.info('sendSystemAVKeyEvent Successfully'); -}).catch((err) => { - console.info(`sendSystemAVKeyEvent : ERROR : ${err.message}`); -}); - -// Send a system control command to the top session. -let avcommand = {command: 'toggleFavorite', parameter: "false"}; -avSession.sendSystemControlCommand(avcommand).then(() => { - console.info('sendSystemControlCommand successfully'); -}).catch((err) => { - console.info(`sendSystemControlCommand : ERROR : ${err.message}`); -}); - -// Cast the session to another device. -let audioManager = audio.getAudioManager(); -let audioDevices; -await audioManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => { - audioDevices = data; - console.info('Promise returned to indicate that the device list is obtained.'); -}).catch((err) => { - console.info(`getDevices : ERROR : ${err.message}`); -}); - -avSession.castAudio('all', audioDevices).then(() => { - console.info('createController : SUCCESS'); -}).catch((err) => { - console.info(`createController : ERROR : ${err.message}`); -}); -``` - -6. Release resources. -```js -// Unsubscribe from the events. - controller.off('metadataChange'); - controller.off('playbackStateChange'); - controller.off('sessionDestroy'); - controller.off('activeStateChange'); - controller.off('validCommandChange'); - controller.off('outputDeviceChange'); - - // Destroy the controller. - controller.destroy().then(() => { - console.info('destroy : SUCCESS '); - }).catch((err) => { - console.info(`destroy : ERROR : ${err.message}`); - }); -``` - -### Verification -When you touch the play, pause, or next button in Media Controller, the playback state of the application changes accordingly. - -### FAQs -1. Controller Does Not Exist -- Symptoms - - A control command or an event is sent to the controller that does not exist. The error message "The session controller does not exist" is displayed. - -- Possible causes - - The controller has been destroyed. - -- Solution - - Query the session record and create the corresponding controller. - -2. Remote Session Connection Failure -- Symptoms - - The communication between the local session and the remote session fails. The error information "The remote session connection failed" is displayed. - -- Possible causes - - The communication between devices is interrupted. - -- Solution - - Stop sending control commands to the session. Subscribe to output device changes, and resume the sending when the output device is changed. - -3. Invalid Session Command -- Symptoms - - The control command or event sent to the session is not supported. The error message "Invalid session command" is displayed. - -- Possible causes - - The session does not support this command. - -- Solution - - Stop sending the command or event. Query the commands supported by the session, and send a command supported. - -4. Too Many Commands or Events -- Symptoms - - The session client sends too many messages or commands to the server in a period of time, causing the server to be overloaded. The error message "Command or event overload" is displayed. - -- Possible causes - - The server is overloaded with messages or events. - -- Solution - - Control the frequency of sending commands or events. diff --git a/en/application-dev/media/avsession-overview.md b/en/application-dev/media/avsession-overview.md index c46211765644330ac26c1154f181904c2db4c3d0..766e642eebc2ba861bf6aceca5f9ea702f99d74f 100644 --- a/en/application-dev/media/avsession-overview.md +++ b/en/application-dev/media/avsession-overview.md @@ -1,56 +1,50 @@ # AVSession Overview -> **NOTE** -> -> All APIs of the **AVSession** module are system APIs and can be called only by system applications. +The Audio and Video Session (AVSession) service is used to manage the playback behavior of all audio and video applications in the system in a unified manner. For example, it allows only one audio application in the playing state. -## Overview +Audio and video applications access the AVSession service and send application data (for example, a song that is being played and playback state) to it. Through a controller, the user can choose another application or device to continue the playback. If an application does not access the AVSession service, its playback will be forcibly interrupted when it switches to the background. - AVSession, short for audio and video session, is also known as media session. - - Application developers can use the APIs provided by the **AVSession** module to connect their audio and video applications to the system's Media Controller. - - System developers can use the APIs provided by the **AVSession** module to display media information of system audio and video applications and carry out unified playback control. +To implement background playback, you must request a continuous task to prevent the task from being suspended. For details, see [Continuous Task Development](../task-management/continuous-task-dev-guide.md). - You can implement the following features through the **AVSession** module: +## Basic Concepts - 1. Unified playback control entry +Be familiar with the following basic concepts before development: - If there are multiple audio and video applications on the device, users need to switch to and access different applications to control media playback. With AVSession, a unified playback control entry of the system (such as Media Controller) is used for playback control of these audio and video applications. No more switching is required. +- AVSession - 2. Better background application management + For AVSession, one end is the audio and video applications under control, and the other end is a controller (for example, Media Controller or AI Voice). AVSession provides a channel for information exchange between the application and controller. - When an application running in the background automatically starts audio playback, it is difficult for users to locate the application. With AVSession, users can quickly find the application that plays the audio clip in Media Controller. +- Provider -## Basic Concepts + An audio and video application that accesses the AVSession service. After accessing AVSession, the audio and video application must provide the media information, for example, the name of the item to play and the playback state, to AVSession. Through AVSession, the application also receives control commands from the controller and responds accordingly. -- AVSession +- Controller + + A system application that accesses AVSession to provide global control on audio and video playback behavior. Typical controllers on OpenHarmony devices are Media Controller and AI Voice. The following sections use Media Controller as an example of the controller. After accessing AVSession, the controller obtains the latest media information and sends control commands to the audio and video applications through AVSession. - A channel used for information exchange between applications and Media Controller. For AVSession, one end is the media application under control, and the other end is Media Controller. Through AVSession, an application can transfer the media playback information to Media Controller and receive control commands from Media Controller. - - AVSessionController - Object that controls media sessions and thereby controls the playback behavior of applications. Through AVSessionController, Media Controller can control the playback behavior of applications, obtain playback information, and send control commands. It can also monitor the playback state of applications to ensure synchronization of the media session information. + An object that controls the playback behavior of the provider. It obtains the playback information of the audio and video application and listens for the application playback changes to synchronize the AVSession information between the application and controller. The controller is the holder of an **AVSessionController** object. + +- AVSessionManager + + An object that provides the capability of managing sessions. It can create an **AVSession** object, create an **AVSessionController** object, send control commands, and listen for session state changes. + -- Media Controller - - Holder of AVSessionController. Through AVSessionController, Media Controller sends commands to control media playback of applications. +## AVSession Interaction Process -## Implementation Principle +AVSessions are classified into local AVSessions and distributed AVSessions. -The **AVSession** module provides two classes: **AVSession** and **AVSessionController**. +![AVSession Interaction Process](figures/avsession-interaction-process.png) -**Figure 1** AVSession interaction +- Local AVSession -![en-us_image_avsession](figures/en-us_image_avsession.png) + Local AVSession establishes a connection between the provider and controller in the local device, so as to implement unified playback control and media information display for audio and video applications in the system. -- Interaction between the application and Media Controller: First, an audio application creates an **AVSession** object and sets session information, including media metadata, launcher ability, and playback state information. Then, Media Controller creates an **AVSessionController** object to obtain session-related information and send the 'play' command to the audio application. Finally, the audio application responds to the command and updates the playback state. +- Distributed AVSession -- Distributed projection: When a connected device creates a local session, Media Controller or the audio application can select another device to be projected based on the device list, synchronize the local session to the remote device, and generate a controllable remote session. The remote session is controlled by sending control commands to the remote device's application through its AVSessionController. + Distributed AVSession establishes a connection between the provider and controller in the cross-device scenario, so as to implement cross-device playback control and media information display for audio and video applications in the system. For example, you can project the content played on device A to device B and perform playback control on device B. ## Constraints -- The playback information displayed in Media Controller is the media information proactively written by the media application to AVSession. -- Media Controller controls the playback of a media application based on the responses of the media application to control commands. -- AVSession can transmit media playback information and control commands. It does not display information or execute control commands. -- Do not develop Media Controller for common applications. For common audio and video applications running on OpenHarmony, the default control end is Media Controller, which is a system application. You do not need to carry out additional development for Media Controller. -- If you want to develop your own system running OpenHarmony, you can develop your own Media Controller. -- For better background management of audio and video applications, the **AVSession** module enforces background control for applications. Only applications that have accessed AVSession can play audio in the background. Otherwise, the system forcibly pauses the playback when an application switches to the background. +The AVSession service manages the playback behavior of all audio and video applications in the system. To continue the playback after switching to the background, the audio and video applications must access the AVSession service. diff --git a/en/application-dev/media/camera-device-input.md b/en/application-dev/media/camera-device-input.md new file mode 100644 index 0000000000000000000000000000000000000000..3702e16760c002010c50da236d4ef9c2af079e5e --- /dev/null +++ b/en/application-dev/media/camera-device-input.md @@ -0,0 +1,82 @@ +# Device Input Management + +Before developing a camera application, you must create an independent camera object. The application invokes and controls the camera object to perform basic operations such as preview, photographing, and video recording. + +## How to Develop + +Read [Camera](../reference/apis/js-apis-camera.md) for the API reference. + +1. Import the camera module, which provides camera-related attributes and methods. + + ```ts + import camera from '@ohos.multimedia.camera'; + ``` + +2. Call **getCameraManager()** to obtain a **CameraManager** object. + + ```ts + let cameraManager; + let context: any = getContext(this); + cameraManager = camera.getCameraManager(context) + ``` + + > **NOTE** + > + > If obtaining the object fails, the camera hardware may be occupied or unusable. If it is occupied, wait until it is released. + +3. Call **getSupportedCameras()** in the **CameraManager** class to obtain the list of cameras supported by the current device. The list stores the IDs of all cameras supported. If the list is not empty, each ID in the list can be used to create an independent camera object. Otherwise, no camera is available for the current device and subsequent operations cannot be performed. + + ```ts + let cameraArray = cameraManager.getSupportedCameras(); + if (cameraArray.length <= 0) { + console.error("cameraManager.getSupportedCameras error"); + return; + } + + for (let index = 0; index < cameraArray.length; index++) { + console.info('cameraId : ' + cameraArray[index].cameraId); // Obtain the camera ID. + console.info('cameraPosition : ' + cameraArray[index].cameraPosition); // Obtain the camera position. + console.info('cameraType : ' + cameraArray[index].cameraType); // Obtain the camera type. + console.info('connectionType : ' + cameraArray[index].connectionType); // Obtain the camera connection type. + } + ``` + +4. Call **getSupportedOutputCapability()** to obtain all output streams supported by the current device, such as preview streams and photo streams. The output stream is in each **profile** field under **CameraOutputCapability**. + + ```ts + // Create a camera input stream. + let cameraInput; + try { + cameraInput = cameraManager.createCameraInput(cameraArray[0]); + } catch (error) { + console.error('Failed to createCameraInput errorCode = ' + error.code); + } + // Listen for CameraInput errors. + let cameraDevice = cameraArray[0]; + cameraInput.on('error', cameraDevice, (error) => { + console.info(`Camera input error code: ${error.code}`); + }) + // Open the camera. + await cameraInput.open(); + // Obtain the output stream capabilities supported by the camera. + let cameraOutputCapability = cameraManager.getSupportedOutputCapability(cameraArray[0]); + if (!cameraOutputCapability) { + console.error("cameraManager.getSupportedOutputCapability error"); + return; + } + console.info("outputCapability: " + JSON.stringify(cameraOutputCapability)); + ``` + + +## Status Listening + +During camera application development, you can listen for the camera status, including the appearance of a new camera, removal of a camera, and availability of a camera. The camera ID and camera status are used in the callback function. When a new camera appears, the new camera can be added to the supported camera list. + +Register the 'cameraStatus' event and return the listening result through a callback, which carries the **CameraStatusInfo** parameter. For details about the parameter, see [CameraStatusInfo](../reference/apis/js-apis-camera.md#camerastatusinfo). + +```ts +cameraManager.on('cameraStatus', (cameraStatusInfo) => { + console.info(`camera: ${cameraStatusInfo.camera.cameraId}`); + console.info(`status: ${cameraStatusInfo.status}`); +}) +``` diff --git a/en/application-dev/media/camera-metadata.md b/en/application-dev/media/camera-metadata.md new file mode 100644 index 0000000000000000000000000000000000000000..8fdeff1df08f624374f2a2a5cee32b99b2c41e03 --- /dev/null +++ b/en/application-dev/media/camera-metadata.md @@ -0,0 +1,66 @@ +# Camera Metadata + +Metadata is the description and context of image information returned by the camera application. It provides detailed data for the image information, for example, coordinates of a viewfinder frame for identifying a portrait in a photo or a video. + +Metadata uses a tag (key) to find the corresponding data during the transfer of parameters and configurations, reducing memory copy operations. + +## How to Develop + +Read [Camera](../reference/apis/js-apis-camera.md) for the API reference. + +1. Obtain the metadata types supported by the current device from **supportedMetadataObjectTypes** in **CameraOutputCapability**, and then use **createMetadataOutput()** to create a metadata output stream. + + ```ts + let metadataObjectTypes = cameraOutputCapability.supportedMetadataObjectTypes; + let metadataOutput; + try { + metadataOutput = cameraManager.createMetadataOutput(metadataObjectTypes); + } catch (error) { + // If the operation fails, error.code is returned and processed. + console.info(error.code); + } + ``` + +2. Call **start()** to start outputting metadata. If the call fails, an error code is returned. For details, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + metadataOutput.start().then(() => { + console.info('Callback returned with metadataOutput started.'); + }).catch((err) => { + console.info('Failed to metadataOutput start '+ err.code); + }); + ``` + +3. Call **stop()** to stop outputting metadata. If the call fails, an error code is returned. For details, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + metadataOutput.stop().then(() => { + console.info('Callback returned with metadataOutput stopped.'); + }).catch((err) => { + console.info('Failed to metadataOutput stop '+ err.code); + }); + ``` + +## Status Listening + +During camera application development, you can listen for the status of metadata objects and output stream. + +- Register the 'metadataObjectsAvailable' event to listen for metadata objects that are available. When a valid metadata object is detected, the callback function returns the metadata. This event can be registered when a **MetadataOutput** object is created. + + ```ts + metadataOutput.on('metadataObjectsAvailable', (metadataObjectArr) => { + console.info(`metadata output metadataObjectsAvailable`); + }) + ``` + + > **NOTE** + > + > Currently, only **FACE_DETECTION** is available for the metadata type. The metadata object is the rectangle of the recognized face, including the x-axis coordinate and y-axis coordinate of the upper left corner of the rectangle as well as the width and height of the rectangle. + +- Register the 'error' event to listen for metadata stream errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + metadataOutput.on('error', (metadataOutputError) => { + console.info(`Metadata output error code: ${metadataOutputError.code}`); + }) + ``` diff --git a/en/application-dev/media/camera-overview.md b/en/application-dev/media/camera-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..03445ee6979c28fb4084a2f3c8186d77f14e5b89 --- /dev/null +++ b/en/application-dev/media/camera-overview.md @@ -0,0 +1,27 @@ +# Camera Overview + +With the APIs provided by the camera module of the multimedia subsystem, you can develop a camera application. The application accesses and operates the camera hardware to implement basic operations, such as preview, photographing, and video recording. It can also perform more operations, for example, controlling the flash and exposure time, and focusing or adjusting the focus. + +## Development Model + +The camera application invokes the camera hardware to collect and process image and video data, and output images and videos. It can be used when there are multiple lenses (such as wide-angle lens, long-focus lens, and ToF lens) in various service scenarios (such as different requirements on the resolution, format, and effect). + +The figure below illustrates the working process of the camera module. The working process can be summarized into three parts: input device management, session management, and output management. + +- During input device management, the camera application invokes the camera hardware to collect data and uses the data as an input stream. + +- During session management, you can configure an input stream to determine the camera to be used. You can also set parameters, such as the flash, exposure time, focus, and focus adjustment, to implement different shooting effects in various service scenarios. The application can switch between sessions to meet service requirements in different scenarios. + +- During output management, you can configure an output stream, which can be a preview stream, photo stream, or video stream. + +**Figure 1** Camera working process +![Camera Workflow](figures/camera-workflow.png) + +For better application development, you are also advised understanding the camera development model. + +**Figure 2** Camera development model +![Camera Development Model](figures/camera-development-model.png) + +The camera application controls the camera hardware to implement basic operations such as image display (preview), photo saving (photographing), and video recording. During the implementation, the camera service controls the camera hardware to collect and output data, and transmits the data to a specific module for processing through a BufferQueue at the bottom camera device hardware interface (HDI) layer. You can ignore the BufferQueue during application development. It is used to send the data processed by the bottom layer to the upper layer for image display. + +For example, in a video recording scenario, the recording service creates a video surface and provides it to the camera service for data transmission. The camera service controls the camera device to collect video data and generate a video stream. After processing the collected data at the HDI layer, the camera service transmits the video stream to the recording service through the surface. The recording service processes the video stream and saves it as a video file. Now video recording is complete. diff --git a/en/application-dev/media/camera-preparation.md b/en/application-dev/media/camera-preparation.md new file mode 100644 index 0000000000000000000000000000000000000000..eb504af9a69f65473f27de59a45a17891357be7f --- /dev/null +++ b/en/application-dev/media/camera-preparation.md @@ -0,0 +1,25 @@ +# Camera Development Preparations + +The main process of camera application development includes development preparations, device input management, session management, preview, photographing, and video recording. + +Before developing a camera application, you must request camera-related permissions (as described in the table below) to ensure that the application has the permission to access the camera hardware and other services. Before requesting the permission, ensure that the [basic principles for permission management](../security/accesstoken-overview.md#basic-principles-for-permission-management) are met. + + +| Permission| Description| Authorization Mode| +| -------- | -------- | -------- | +| ohos.permission.CAMERA | Allows an application to use the camera to take photos and record videos.| user_grant | +| ohos.permission.MICROPHONE | Allows an application to access the microphone.
This permission is required only if the application is used to record audio.| user_grant | +| ohos.permission.WRITE_MEDIA | Allows an application to read media files from and write media files into the user's external storage. This permission is optional.| user_grant | +| ohos.permission.READ_MEDIA | Allows an application to read media files from the user's external storage. This permission is optional.| user_grant | +| ohos.permission.MEDIA_LOCATION | Allows an application to access geographical locations in the user's media file. This permission is optional.| user_grant | + + +After configuring the permissions in the **module.json5** file, the application must call [abilityAccessCtrl.requestPermissionsFromUser](../reference/apis/js-apis-abilityAccessCtrl.md#requestpermissionsfromuser9) to check whether the required permissions are granted. If not, request the permissions from the user by displaying a dialog box. + + +For details about how to request and verify the permissions, see [Permission Application Guide](../security/accesstoken-guidelines.md). + + +> **NOTE** +> +> Even if the user has granted a permission, the application must check for the permission before calling an API protected by the permission. It should not persist the permission granted status, because the user can revoke the permission through the system application **Settings**. diff --git a/en/application-dev/media/camera-preview.md b/en/application-dev/media/camera-preview.md new file mode 100644 index 0000000000000000000000000000000000000000..e65f5dac8c96737b81b20703ce6ffa6fe7daa54b --- /dev/null +++ b/en/application-dev/media/camera-preview.md @@ -0,0 +1,87 @@ +# Camera Preview + +Preview is the image you see after you start the camera application but before you take photos or record videos. + +## How to Develop + +Read [Camera](../reference/apis/js-apis-camera.md) for the API reference. + +1. Create a surface. + + The XComponent, the capabilities of which are provided by the UI, offers the surface for preview streams. For details, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md). + + ```ts + // Create an XComponentController object. + mXComponentController: XComponentController = new XComponentController; + build() { + Flex() { + // Create an XComponent. + XComponent({ + id: '', + type: 'surface', + libraryname: '', + controller: this.mXComponentController + }) + .onLoad(() => { + // Set the surface width and height (1920 x 1080). For details about how to set the preview size, see the preview resolutions supported by the current device, which are obtained from previewProfilesArray. + this.mXComponentController.setXComponentSurfaceSize({surfaceWidth:1920,surfaceHeight:1080}); + // Obtain the surface ID. + globalThis.surfaceId = this.mXComponentController.getXComponentSurfaceId(); + }) + .width('1920px') + .height('1080px') + } + } + ``` + +2. Call **previewProfiles()** in the **CameraOutputCapability** class to obtain the preview capabilities, in the format of an **previewProfilesArray** array, supported by the current device. Then call **createPreviewOutput()** to create a preview output stream, with the first parameter set to the first item in the **previewProfilesArray** array and the second parameter set to the surface ID obtained in step 1. + + ```ts + let previewProfilesArray = cameraOutputCapability.previewProfiles; + let previewOutput; + try { + previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId); + } + catch (error) { + console.error("Failed to create the PreviewOutput instance." + error); + } + ``` + +3. Call **start()** to start outputting the preview stream. If the call fails, an error code is returned. For details, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + previewOutput.start().then(() => { + console.info('Callback returned with previewOutput started.'); + }).catch((err) => { + console.info('Failed to previewOutput start '+ err.code); + }); + ``` + + +## Status Listening + +During camera application development, you can listen for the preview output stream status, including preview stream start, preview stream end, and preview stream output errors. + +- Register the 'frameStart' event to listen for preview start events This event can be registered when a **PreviewOutput** object is created and is triggered when the bottom layer starts exposure for the first time. The preview stream is started as long as a result is returned. + + ```ts + previewOutput.on('frameStart', () => { + console.info('Preview frame started'); + }) + ``` + +- Register the 'frameEnd' event to listen for preview end events. This event can be registered when a **PreviewOutput** object is created and is triggered when the last frame of preview ends. The preview stream ends as long as a result is returned. + + ```ts + previewOutput.on('frameEnd', () => { + console.info('Preview frame ended'); + }) + ``` + +- Register the 'error' event to listen for preview output errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + previewOutput.on('error', (previewOutputError) => { + console.info(`Preview output error code: ${previewOutputError.code}`); + }) + ``` diff --git a/en/application-dev/media/camera-recording-case.md b/en/application-dev/media/camera-recording-case.md new file mode 100644 index 0000000000000000000000000000000000000000..7aedbf5688812c47542ee627329b137325f17bbc --- /dev/null +++ b/en/application-dev/media/camera-recording-case.md @@ -0,0 +1,247 @@ +# Camera Recording Sample + +## Development Process + +After obtaining the output stream capabilities supported by the camera, create a video stream. The development process is as follows: + +![Recording Development Process](figures/recording-development-process.png) + + +## Sample Code + +```ts +import camera from '@ohos.multimedia.camera' +import media from '@ohos.multimedia.media' + +// Create a CameraManager instance. +context: any = getContext(this) +let cameraManager = camera.getCameraManager(this.context) +if (!cameraManager) { + console.error("camera.getCameraManager error") + return; +} + +// Listen for camera status changes. +cameraManager.on('cameraStatus', (cameraStatusInfo) => { + console.log(`camera : ${cameraStatusInfo.camera.cameraId}`); + console.log(`status: ${cameraStatusInfo.status}`); +}) + +// Obtain the output stream capabilities supported by the camera. +let cameraOutputCap = cameraManager.getSupportedOutputCapability(cameraArray[0]); +if (!cameraOutputCap) { + console.error("cameraManager.getSupportedOutputCapability error") + return; +} +console.log("outputCapability: " + JSON.stringify(cameraOutputCap)); + +let previewProfilesArray = cameraOutputCap.previewProfiles; +if (!previewProfilesArray) { + console.error("createOutput previewProfilesArray == null || undefined") +} + +let photoProfilesArray = cameraOutputCap.photoProfiles; +if (!photoProfilesArray) { + console.error("createOutput photoProfilesArray == null || undefined") +} + +let videoProfilesArray = cameraOutputCap.videoProfiles; +if (!videoProfilesArray) { + console.error("createOutput videoProfilesArray == null || undefined") +} + +let metadataObjectTypesArray = cameraOutputCap.supportedMetadataObjectTypes; +if (!metadataObjectTypesArray) { + console.error("createOutput metadataObjectTypesArray == null || undefined") +} + +// Configure the parameters based on those supported by the hardware device. +let AVRecorderProfile = { + audioBitrate : 48000, + audioChannels : 2, + audioCodec : media.CodecMimeType.AUDIO_AAC, + audioSampleRate : 48000, + fileFormat : media.ContainerFormatType.CFT_MPEG_4, + videoBitrate : 2000000, + videoCodec : media.CodecMimeType.VIDEO_MPEG4, + videoFrameWidth : 640, + videoFrameHeight : 480, + videoFrameRate : 30 +} +let AVRecorderConfig = { + audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, + videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, + profile : AVRecorderProfile, + url : 'fd://', // Before passing in a file descriptor to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: eg.fd://45--file:///data/media/01.mp4. + rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error. + location : { latitude : 30, longitude : 130 } +} + +let avRecorder +media.createAVRecorder((error, recorder) => { + if (recorder != null) { + avRecorder = recorder; + console.log('createAVRecorder success'); + } else { + console.log(`createAVRecorder fail, error:${error}`); + } +}); + +avRecorder.prepare(AVRecorderConfig, (err) => { + if (err == null) { + console.log('prepare success'); + } else { + console.log('prepare failed and error is ' + err.message); + } +}) + +let videoSurfaceId = null; // The surfaceID is passed in to the camera API to create a VideoOutput instance. +avRecorder.getInputSurface((err, surfaceId) => { + if (err == null) { + console.log('getInputSurface success'); + videoSurfaceId = surfaceId; + } else { + console.log('getInputSurface failed and error is ' + err.message); + } +}); + +// Create a VideoOutput instance. +let videoOutput +try { + videoOutput = cameraManager.createVideoOutput(videoProfilesArray[0], videoSurfaceId) +} catch (error) { + console.error('Failed to create the videoOutput instance. errorCode = ' + error.code); +} + +// Listen for video output errors. +videoOutput.on('error', (error) => { + console.log(`Preview output error code: ${error.code}`); +}) + +// Create a session. +let captureSession +try { + captureSession = cameraManager.createCaptureSession() +} catch (error) { + console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code); +} + +// Listen for session errors. +captureSession.on('error', (error) => { + console.log(`Capture session error code: ${error.code}`); +}) + +// Start configuration for the session. +try { + captureSession.beginConfig() +} catch (error) { + console.error('Failed to beginConfig. errorCode = ' + error.code); +} + +// Obtain the camera list. +let cameraArray = cameraManager.getSupportedCameras(); +if (cameraArray.length <= 0) { + console.error("cameraManager.getSupportedCameras error") + return; +} + +// Create a camera input stream. +let cameraInput +try { + cameraInput = cameraManager.createCameraInput(cameraArray[0]); +} catch (error) { + console.error('Failed to createCameraInput errorCode = ' + error.code); +} + +// Listen for camera input errors. +let cameraDevice = cameraArray[0]; +cameraInput.on('error', cameraDevice, (error) => { + console.log(`Camera input error code: ${error.code}`); +}) + +// Open the camera. +await cameraInput.open(); + +// Add the camera input stream to the session. +try { + captureSession.addInput(cameraInput) +} catch (error) { + console.error('Failed to addInput. errorCode = ' + error.code); +} + +// Create a preview output stream. For details about the surfaceId parameter, see the XComponent. The preview stream is the surface provided by the XComponent. +let previewOutput +try { + previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId) +} catch (error) { + console.error("Failed to create the PreviewOutput instance.") +} + +// Add the preview input stream to the session. +try { + captureSession.addOutput(previewOutput) +} catch (error) { + console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code); +} + +// Add a video output stream to the session. +try { + captureSession.addOutput(videoOutput) +} catch (error) { + console.error('Failed to addOutput(videoOutput). errorCode = ' + error.code); +} + +// Commit the session configuration. +await captureSession.commitConfig() + +// Start the session. +await captureSession.start().then(() => { + console.log('Promise returned to indicate the session start success.'); +}) + +// Start the video output stream. +videoOutput.start(async (err) => { + if (err) { + console.error('Failed to start the video output ${err.message}'); + return; + } + console.log('Callback invoked to indicate the video output start success.'); +}); + +// Start video recording. +avRecorder.start().then(() => { + console.log('videoRecorder start success'); +}) + +// Stop the video output stream. +videoOutput.stop((err) => { + if (err) { + console.error('Failed to stop the video output ${err.message}'); + return; + } + console.log('Callback invoked to indicate the video output stop success.'); +}); + +// Stop video recording. +avRecorder.stop().then(() => { + console.log('stop success'); +}) + +// Stop the session. +captureSession.stop() + +// Release the camera input stream. +cameraInput.close() + +// Release the preview output stream. +previewOutput.release() + +// Release the video output stream. +videoOutput.release() + +// Release the session. +captureSession.release() + +// Set the session to null. +captureSession = null +``` diff --git a/en/application-dev/media/camera-recording.md b/en/application-dev/media/camera-recording.md new file mode 100644 index 0000000000000000000000000000000000000000..208b0664204ef2f74bb1160702053bde61fdf316 --- /dev/null +++ b/en/application-dev/media/camera-recording.md @@ -0,0 +1,155 @@ +# Camera Recording + +Video recording is also an important function of the camera application. Video recording is the process of cyclic capturing of frames. To smooth videos, you can follow step 4 in [Camera Photographing](camera-shooting.md) to set the resolution, flash, focal length, photo quality, and rotation angle. + +## How to Develop + +Read [Camera](../reference/apis/js-apis-camera.md) for the API reference. + +1. Import the media module. The [APIs](../reference/apis/js-apis-media.md) provided by this module are used to obtain the surface ID and create a photo output stream. + + ```ts + import media from '@ohos.multimedia.media'; + ``` + +2. Create a surface. + + Call **createAVRecorder()** of the media module to create an **AVRecorder** instance, and call **getInputSurface()** of the instance to obtain the surface ID, which is associated with the view output stream to process the data output by the stream. + + ```ts + let AVRecorder; + media.createAVRecorder((error, recorder) => { + if (recorder != null) { + AVRecorder = recorder; + console.info('createAVRecorder success'); + } else { + console.info(`createAVRecorder fail, error:${error}`); + } + }); + // For details about AVRecorderConfig, see the next section. + AVRecorder.prepare(AVRecorderConfig, (err) => { + if (err == null) { + console.log('prepare success'); + } else { + console.log('prepare failed and error is ' + err.message); + } + }) + + let videoSurfaceId = null; + AVRecorder.getInputSurface().then((surfaceId) => { + console.info('getInputSurface success'); + videoSurfaceId = surfaceId; + }).catch((err) => { + console.info('getInputSurface failed and catch error is ' + err.message); + }); + ``` + +3. Create a video output stream. + + Obtain the video output streams supported by the current device from **videoProfiles** in the **CameraOutputCapability** class. Then, define video recording parameters and use **createVideoOutput()** to create a video output stream. + + ```ts + let videoProfilesArray = cameraOutputCapability.videoProfiles; + if (!videoProfilesArray) { + console.error("createOutput videoProfilesArray == null || undefined"); + } + + // Define video recording parameters. + let videoConfig = { + videoSourceType: media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, + profile: { + fileFormat: media.ContainerFormatType.CFT_MPEG_4, // Video file encapsulation format. Only MP4 is supported. + videoBitrate: 100000, // Video bit rate. + videoCodec: media.CodecMimeType.VIDEO_MPEG4, // Video file encoding format. Both MPEG-4 and AVC are supported. + videoFrameWidth: 640, // Video frame width. + videoFrameHeight: 480, // Video frame height. + videoFrameRate: 30 // Video frame rate. + }, + url: 'fd://35', + rotation: 0 + } + // Create an AVRecorder instance. + let avRecorder; + media.createAVRecorder((error, recorder) => { + if (recorder != null) { + avRecorder = recorder; + console.info('createAVRecorder success'); + } else { + console.info(`createAVRecorder fail, error:${error}`); + } + }); + // Set video recording parameters. + avRecorder.prepare(videoConfig); + // Create a VideoOutput instance. + let videoOutput; + try { + videoOutput = cameraManager.createVideoOutput(videoProfilesArray[0], videoSurfaceId); + } catch (error) { + console.error('Failed to create the videoOutput instance. errorCode = ' + error.code); + } + ``` + +4. Start video recording. + + Call **start()** of the **VideoOutput** instance to start the video output stream, and then call **start()** of the **AVRecorder** instance to start recording. + + ``` + videoOutput.start(async (err) => { + if (err) { + console.error('Failed to start the video output ${err.message}'); + return; + } + console.info('Callback invoked to indicate the video output start success.'); + }); + + avRecorder.start().then(() => { + console.info('avRecorder start success'); + } + ``` + +5. Stop video recording. + + Call **stop()** of the **AVRecorder** instance to stop recording, and then call **stop()** of the **VideoOutput** instance to stop the video output stream. + + ```ts + videoRecorder.stop().then(() => { + console.info('stop success'); + } + + videoOutput.stop((err) => { + if (err) { + console.error('Failed to stop the video output ${err.message}'); + return; + } + console.info('Callback invoked to indicate the video output stop success.'); + }); + ``` + + +## Status Listening + +During camera application development, you can listen for the status of the video output stream, including recording start, recording end, and recording stream output errors. + +- Register the 'frameStart' event to listen for recording start events. This event can be registered when a **VideoOutput** object is created and is triggered when the bottom layer starts exposure for recording for the first time. Video recording is started as long as a result is returned. + + ```ts + videoOutput.on('frameStart', () => { + console.info('Video frame started'); + }) + ``` + +- Register the 'frameEnd' event to listen for recording end events. This event can be registered when a **VideoOutput** object is created and is triggered when the last frame of recording ends. Video recording ends as long as a result is returned. + + ```ts + videoOutput.on('frameEnd', () => { + console.info('Video frame ended'); + }) + ``` + +- Register the 'error' event to listen for video output errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + videoOutput.on('error', (error) => { + console.info(`Video output error code: ${error.code}`); + }) + ``` diff --git a/en/application-dev/media/camera-session-management.md b/en/application-dev/media/camera-session-management.md new file mode 100644 index 0000000000000000000000000000000000000000..1d0d2fcfe20428d33d72569cbf2212b830ad42e2 --- /dev/null +++ b/en/application-dev/media/camera-session-management.md @@ -0,0 +1,86 @@ +# Camera Session Management + +Before using the camera application for preview, photographing, video recording, and metadata, you must create a camera session. + +You can implement the following functions in the session: + +- Configure the camera input and output streams. This is mandatory for photographing. + Configuring an input stream is to add a device input, which means that the user selects a camera for photographing. Configuring an output stream is to select a data output mode. For example, to implement photographing, you must configure both the preview stream and photo stream as the output stream. The data of the preview stream is displayed on the XComponent, and that of the photo stream is saved to the Gallery application through the **ImageReceiver** API. + +- Perform more operations on the camera hardware. For example, add the flash and adjust the focal length. For details about the supported configurations and APIs, see [Camera API Reference](../reference/apis/js-apis-camera.md). + +- Control session switching. The application can switch the camera mode by removing and adding output streams. For example, to switch from photographing to video recording, the application must remove the photo output stream and add the video output stream. + +After the session configuration is complete, the application must commit the configuration and start the session before using the camera functionalities. + +## How to Develop + +1. Call **createCaptureSession()** in the **CameraManager** class to create a session. + + ```ts + let captureSession; + try { + captureSession = cameraManager.createCaptureSession(); + } catch (error) { + console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code); + } + ``` + +2. Call **beginConfig()** in the **CaptureSession** class to start configuration for the session. + + ```ts + try { + captureSession.beginConfig(); + } catch (error) { + console.error('Failed to beginConfig. errorCode = ' + error.code); + } + ``` + +3. Configure the session. You can call **addInput()** and **addOutput()** in the **CaptureSession** class to add the input and output streams to the session, respectively. The code snippet below uses adding the preview stream **previewOutput** and photo stream **photoOutput** as an example to implement the photographing and preview mode. + + After the configuration, call **commitConfig()** and **start()** in the **CaptureSession** class in sequence to commit the configuration and start the session. + + ```ts + try { + captureSession.addInput(cameraInput); + } catch (error) { + console.error('Failed to addInput. errorCode = ' + error.code); + } + try { + captureSession.addOutput(previewOutput); + } catch (error) { + console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code); + } + try { + captureSession.addOutput(photoOutput); + } catch (error) { + console.error('Failed to addOutput(photoOutput). errorCode = ' + error.code); + } + await captureSession.commitConfig() ; + await captureSession.start().then(() => { + console.info('Promise returned to indicate the session start success.'); + }) + ``` + +4. Control the session. You can call **stop()** in the **CaptureSession** class to stop the session, and call **removeOutput()** and **addOutput()** in this class to switch to another session. The code snippet below uses removing the photo stream **photoOutput** and adding the video stream **videoOutput** as an example to complete the switching from photographing to recording. + + ```ts + await captureSession.stop(); + try { + captureSession.beginConfig(); + } catch (error) { + console.error('Failed to beginConfig. errorCode = ' + error.code); + } + // Remove the photo output stream from the session. + try { + captureSession.removeOutput(photoOutput); + } catch (error) { + console.error('Failed to removeOutput(photoOutput). errorCode = ' + error.code); + } + // Add the video output stream to the session. + try { + captureSession.addOutput(videoOutput); + } catch (error) { + console.error('Failed to addOutput(videoOutput). errorCode = ' + error.code); + } + ``` diff --git a/en/application-dev/media/camera-shooting-case.md b/en/application-dev/media/camera-shooting-case.md new file mode 100644 index 0000000000000000000000000000000000000000..da2588b10b844fd2a9432da909d1d387b8193d9f --- /dev/null +++ b/en/application-dev/media/camera-shooting-case.md @@ -0,0 +1,239 @@ +# Camera Photographing Sample + +## Development Process + +After obtaining the output stream capabilities supported by the camera, create a photo stream. The development process is as follows: + +![Photographing Development Process](figures/photographing-development-process.png) + +## Sample Code + +```ts +import camera from '@ohos.multimedia.camera' +import image from '@ohos.multimedia.image' +import media from '@ohos.multimedia.media' + +// Create a CameraManager instance. +context: any = getContext(this) +let cameraManager = camera.getCameraManager(this.context) +if (!cameraManager) { + console.error("camera.getCameraManager error") + return; +} +// Listen for camera status changes. +cameraManager.on('cameraStatus', (cameraStatusInfo) => { + console.info(`camera : ${cameraStatusInfo.camera.cameraId}`); + console.info(`status: ${cameraStatusInfo.status}`); +}) + +// Obtain the camera list. +let cameraArray = cameraManager.getSupportedCameras(); +if (cameraArray.length <= 0) { + console.error("cameraManager.getSupportedCameras error") + return; +} + +for (let index = 0; index < cameraArray.length; index++) { + console.info('cameraId : ' + cameraArray[index].cameraId); // Obtain the camera ID. + console.info('cameraPosition : ' + cameraArray[index].cameraPosition); // Obtain the camera position. + console.info('cameraType : ' + cameraArray[index].cameraType); // Obtain the camera type. + console.info('connectionType : ' + cameraArray[index].connectionType); // Obtain the camera connection type. +} + +// Create a camera input stream. +let cameraInput +try { + cameraInput = cameraManager.createCameraInput(cameraArray[0]); +} catch (error) { + console.error('Failed to createCameraInput errorCode = ' + error.code); +} + +// Listen for camera input errors. +let cameraDevice = cameraArray[0]; +cameraInput.on('error', cameraDevice, (error) => { + console.info(`Camera input error code: ${error.code}`); +}) + +// Open the camera. +await cameraInput.open(); + +// Obtain the output stream capabilities supported by the camera. +let cameraOutputCap = cameraManager.getSupportedOutputCapability(cameraArray[0]); +if (!cameraOutputCap) { + console.error("cameraManager.getSupportedOutputCapability error") + return; +} +console.info("outputCapability: " + JSON.stringify(cameraOutputCap)); + +let previewProfilesArray = cameraOutputCap.previewProfiles; +if (!previewProfilesArray) { + console.error("createOutput previewProfilesArray == null || undefined") +} + +let photoProfilesArray = cameraOutputCap.photoProfiles; +if (!photoProfilesArray) { + console.error("createOutput photoProfilesArray == null || undefined") +} + +// Create a preview output stream. For details about the surfaceId parameter, see the XComponent. The preview stream is the surface provided by the XComponent. +let previewOutput +try { + previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId) +} catch (error) { + console.error("Failed to create the PreviewOutput instance.") +} + +// Listen for preview output errors. +previewOutput.on('error', (error) => { + console.info(`Preview output error code: ${error.code}`); +}) + +// Create an ImageReceiver instance and set photographing parameters. Wherein, the resolution must be one of the photographing resolutions supported by the current device, which are obtained by photoProfilesArray. +let imageReceiver = await image.createImageReceiver(1920, 1080, 4, 8) +// Obtain the surface ID for displaying the photos. +let photoSurfaceId = await imageReceiver.getReceivingSurfaceId() +// Create a photo output stream. +let photoOutput +try { + photoOutput = cameraManager.createPhotoOutput(photoProfilesArray[0], photoSurfaceId) +} catch (error) { + console.error('Failed to createPhotoOutput errorCode = ' + error.code); +} +// Create a session. +let captureSession +try { + captureSession = cameraManager.createCaptureSession() +} catch (error) { + console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code); +} + +// Listen for session errors. +captureSession.on('error', (error) => { + console.info(`Capture session error code: ${error.code}`); +}) + +// Start configuration for the session. +try { + captureSession.beginConfig() +} catch (error) { + console.error('Failed to beginConfig. errorCode = ' + error.code); +} + +// Add the camera input stream to the session. +try { + captureSession.addInput(cameraInput) +} catch (error) { + console.error('Failed to addInput. errorCode = ' + error.code); +} + +// Add the preview output stream to the session. +try { + captureSession.addOutput(previewOutput) +} catch (error) { + console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code); +} + +// Add the photo output stream to the session. +try { + captureSession.addOutput(photoOutput) +} catch (error) { + console.error('Failed to addOutput(photoOutput). errorCode = ' + error.code); +} + +// Commit the session configuration. +await captureSession.commitConfig() + +// Start the session. +await captureSession.start().then(() => { + console.info('Promise returned to indicate the session start success.'); +}) +// Check whether the camera has flash. +let flashStatus +try { + flashStatus = captureSession.hasFlash() +} catch (error) { + console.error('Failed to hasFlash. errorCode = ' + error.code); +} +console.info('Promise returned with the flash light support status:' + flashStatus); + +if (flashStatus) { + // Check whether the auto flash mode is supported. + let flashModeStatus + try { + let status = captureSession.isFlashModeSupported(camera.FlashMode.FLASH_MODE_AUTO) + flashModeStatus = status + } catch (error) { + console.error('Failed to check whether the flash mode is supported. errorCode = ' + error.code); + } + if(flashModeStatus) { + // Set the flash mode to auto. + try { + captureSession.setFlashMode(camera.FlashMode.FLASH_MODE_AUTO) + } catch (error) { + console.error('Failed to set the flash mode. errorCode = ' + error.code); + } + } +} + +// Check whether the continuous auto focus is supported. +let focusModeStatus +try { + let status = captureSession.isFocusModeSupported(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO) + focusModeStatus = status +} catch (error) { + console.error('Failed to check whether the focus mode is supported. errorCode = ' + error.code); +} + +if (focusModeStatus) { + // Set the focus mode to continuous auto focus. + try { + captureSession.setFocusMode(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO) + } catch (error) { + console.error('Failed to set the focus mode. errorCode = ' + error.code); + } +} + +// Obtain the zoom ratio range supported by the camera. +let zoomRatioRange +try { + zoomRatioRange = captureSession.getZoomRatioRange() +} catch (error) { + console.error('Failed to get the zoom ratio range. errorCode = ' + error.code); +} + +// Set a zoom ratio. +try { + captureSession.setZoomRatio(zoomRatioRange[0]) +} catch (error) { + console.error('Failed to set the zoom ratio value. errorCode = ' + error.code); +} +let settings = { + quality: camera.QualityLevel.QUALITY_LEVEL_HIGH, // Set the photo quality to high. + rotation: camera.ImageRotation.ROTATION_0 // Set the rotation angle of the photo to 0. +} +// Use the current photographing settings to take photos. +photoOutput.capture(settings, async (err) => { + if (err) { + console.error('Failed to capture the photo ${err.message}'); + return; + } + console.info('Callback invoked to indicate the photo capture request success.'); +}); +// Stop the session. +captureSession.stop() + +// Release the camera input stream. +cameraInput.close() + +// Release the preview output stream. +previewOutput.release() + +// Release the photo output stream. +photoOutput.release() + +// Release the session. +captureSession.release() + +// Set the session to null. +captureSession = null +``` diff --git a/en/application-dev/media/camera-shooting.md b/en/application-dev/media/camera-shooting.md new file mode 100644 index 0000000000000000000000000000000000000000..9026267ebc0a6950ced6b5092ce88e8ed31d2e24 --- /dev/null +++ b/en/application-dev/media/camera-shooting.md @@ -0,0 +1,159 @@ +# Camera Photographing + +Photographing is an important function of the camera application. Based on the complex logic of the camera hardware, the camera module provides APIs for you to set information such as resolution, flash, focal length, photo quality, and rotation angle. + +## How to Develop + +Read [Camera](../reference/apis/js-apis-camera.md) for the API reference. + +1. Import the image module. The APIs provided by this module are used to obtain the surface ID and create a photo output stream. + + ```ts + import image from '@ohos.multimedia.image'; + ``` + +2. Obtain the surface ID. + + Call **createImageReceiver()** of the image module to create an **ImageReceiver** instance, and use **getReceivingSurfaceId()** of the instance to obtain the surface ID, which is associated with the photo output stream to process the data output by the stream. + + ```ts + function getImageReceiverSurfaceId() { + let receiver = image.createImageReceiver(640, 480, 4, 8); + console.info('before ImageReceiver check'); + if (receiver !== undefined) { + console.info('ImageReceiver is ok'); + let photoSurfaceId = receiver.getReceivingSurfaceId(); + console.info('ImageReceived id: ' + JSON.stringify(photoSurfaceId)); + } else { + console.info('ImageReceiver is not ok'); + } + } + ``` + +3. Create a photo output stream. + + Obtain the photo output streams supported by the current device from **photoProfiles** in **CameraOutputCapability**, and then call **createPhotoOutput()** to pass in a supported output stream and the surface ID obtained in step 1 to create a photo output stream. + + ```ts + let photoProfilesArray = cameraOutputCapability.photoProfiles; + if (!photoProfilesArray) { + console.error("createOutput photoProfilesArray == null || undefined"); + } + let photoOutput; + try { + photoOutput = cameraManager.createPhotoOutput(photoProfilesArray[0], photoSurfaceId); + } catch (error) { + console.error('Failed to createPhotoOutput errorCode = ' + error.code); + } + ``` + +4. Set camera parameters. + + You can set camera parameters to adjust photographing functions, including the flash, zoom ratio, and focal length. + + ```ts + // Check whether the camera has flash. + let flashStatus; + try { + flashStatus = captureSession.hasFlash(); + } catch (error) { + console.error('Failed to hasFlash. errorCode = ' + error.code); + } + console.info('Promise returned with the flash light support status:' + flashStatus); + if (flashStatus) { + // Check whether the auto flash mode is supported. + let flashModeStatus; + try { + let status = captureSession.isFlashModeSupported(camera.FlashMode.FLASH_MODE_AUTO); + flashModeStatus = status; + } catch (error) { + console.error('Failed to check whether the flash mode is supported. errorCode = ' + error.code); + } + if(flashModeStatus) { + // Set the flash mode to auto. + try { + captureSession.setFlashMode(camera.FlashMode.FLASH_MODE_AUTO); + } catch (error) { + console.error('Failed to set the flash mode. errorCode = ' + error.code); + } + } + } + // Check whether the continuous auto focus is supported. + let focusModeStatus; + try { + let status = captureSession.isFocusModeSupported(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO); + focusModeStatus = status; + } catch (error) { + console.error('Failed to check whether the focus mode is supported. errorCode = ' + error.code); + } + if (focusModeStatus) { + // Set the focus mode to continuous auto focus. + try { + captureSession.setFocusMode(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO); + } catch (error) { + console.error('Failed to set the focus mode. errorCode = ' + error.code); + } + } + // Obtain the zoom ratio range supported by the camera. + let zoomRatioRange; + try { + zoomRatioRange = captureSession.getZoomRatioRange(); + } catch (error) { + console.error('Failed to get the zoom ratio range. errorCode = ' + error.code); + } + // Set a zoom ratio. + try { + captureSession.setZoomRatio(zoomRatioRange[0]); + } catch (error) { + console.error('Failed to set the zoom ratio value. errorCode = ' + error.code); + } + ``` + +5. Trigger photographing. + + Call **capture()** in the **PhotoOutput** class to capture a photo. In this API, the first parameter specifies the settings (for example, photo quality and rotation angle) for photographing, and the second parameter is a callback function. + + ```ts + let settings = { + quality: camera.QualityLevel.QUALITY_LEVEL_HIGH, // Set the photo quality to high. + rotation: camera.ImageRotation.ROTATION_0, // Set the rotation angle of the photo to 0. + location: captureLocation, // Set the geolocation information of the photo. + mirror: false // Disable mirroring (disabled by default). + }; + photoOutput.capture(settings, async (err) => { + if (err) { + console.error('Failed to capture the photo ${err.message}'); + return; + } + console.info('Callback invoked to indicate the photo capture request success.'); + }); + ``` + +## Status Listening + +During camera application development, you can listen for the status of the photo output stream, including the start of the photo stream, the start and end of the photo frame, and the errors of the photo output stream. + +- Register the 'captureStart' event to listen for photographing start events. This event can be registered when a **PhotoOutput** object is created and is triggered when the bottom layer starts exposure for photographing for the first time. The capture ID is returned. + + ```ts + photoOutput.on('captureStart', (captureId) => { + console.info(`photo capture stated, captureId : ${captureId}`); + }) + ``` + +- Register the 'captureEnd' event to listen for photographing end events. This event can be registered when a **PhotoOutput** object is created and is triggered when the photographing is complete. [CaptureEndInfo](../reference/apis/js-apis-camera.md#captureendinfo) is returned. + + ```ts + photoOutput.on('captureEnd', (captureEndInfo) => { + console.info(`photo capture end, captureId : ${captureEndInfo.captureId}`); + console.info(`frameCount : ${captureEndInfo.frameCount}`); + }) + ``` + +- Register the 'error' event to listen for photo output errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode). + + ```ts + photoOutput.on('error', (error) => { + console.info(`Photo output error code: ${error.code}`); + }) + ``` diff --git a/en/application-dev/media/camera.md b/en/application-dev/media/camera.md deleted file mode 100644 index 0622db9c3ce6d962001b47ca6d2e6d1bc2aaff7c..0000000000000000000000000000000000000000 --- a/en/application-dev/media/camera.md +++ /dev/null @@ -1,511 +0,0 @@ -# Camera Development - -## When to Use - -With the APIs provided by the **Camera** module, you can access and operate camera devices and develop new functions. Common operations include preview, photographing, and video recording. You can also implement flash control, exposure time control, focus mode control, zoom control, and much more. - -Before calling camera APIs, be familiar with the following concepts: - -- **Static camera capabilities**: A series of parameters used to describe inherent capabilities of a camera, such as orientation and supported resolution. -- **Physical camera**: An independent camera device. The physical camera ID is a string that uniquely identifies a physical camera. -- **Asynchronous operation**: A non-blocking operation that allows other operations to execute before it completes. To prevent the UI thread from being blocked, some **Camera** calls are asynchronous. Each asynchronous API provides the callback and promise functions. - -## How to Develop - -### Available APIs - -For details about the APIs, see [Camera Management](../reference/apis/js-apis-camera.md). - -### Full-Process Scenario - -The full process includes applying for permissions, creating an instance, setting parameters, managing sessions, taking photos, recording videos, and releasing resources. - -#### Applying for Permissions - -You must apply for the permissions for your application to access the camera device and other functions. The following table lists camera-related permissions. - -| Permission| Attribute Value | -| -------- | ------------------------------ | -| Camera| ohos.permission.CAMERA | -| Call recording| ohos.permission.MICROPHONE | -| Storage| ohos.permission.WRITE_MEDIA | -| Read| ohos.permission.READ_MEDIA | -| Location| ohos.permission.MEDIA_LOCATION | - -The code snippet is as follows: - -```typescript -const PERMISSIONS: Array = [ - 'ohos.permission.CAMERA', - 'ohos.permission.MICROPHONE', - 'ohos.permission.MEDIA_LOCATION', - 'ohos.permission.READ_MEDIA', - 'ohos.permission.WRITE_MEDIA' -] - -function applyPermission() { - console.info('[permission] get permission'); - globalThis.abilityContext.requestPermissionFromUser(PERMISSIONS) - } -``` - -#### Creating an Instance - -You must create an independent **CameraManager** instance before performing camera operations. If this operation fails, the camera may be occupied or unusable. If the camera is occupied, wait until it is released. You can call **getSupportedCameras()** to obtain the list of cameras supported by the current device. The list stores all camera IDs of the current device. Each of these IDs can be used to create an independent **CameraManager** instance. If the list is empty, no camera is available for the current device and subsequent operations cannot be performed. The camera has preview, shooting, video recording, and metadata output streams. You can use **getSupportedOutputCapability()** to obtain the output stream capabilities of the camera and configure them in the **profile** field in **CameraOutputCapability**. The procedure for creating a **CameraManager** instance is as follows: - -```typescript -import camera from '@ohos.multimedia.camera' -import image from '@ohos.multimedia.image' -import media from '@ohos.multimedia.media' - -// Create a CameraManager instance. -context: any = getContext(this) -let cameraManager = camera.getCameraManager(this.context) -if (!cameraManager) { - console.error("camera.getCameraManager error") - return; -} -// Listen for camera state changes. -cameraManager.on('cameraStatus', (cameraStatusInfo) => { - console.log(`camera : ${cameraStatusInfo.camera.cameraId}`); - console.log(`status: ${cameraStatusInfo.status}`); -}) - -// Obtain the camera list. -let cameraArray = cameraManager.getSupportedCameras(); -if (cameraArray.length <= 0) { - console.error("cameraManager.getSupportedCameras error") - return; -} - -for (let index = 0; index < cameraArray.length; index++) { - console.log('cameraId : ' + cameraArray[index].cameraId); // Obtain the camera ID. - console.log('cameraPosition : ' + cameraArray[index].cameraPosition); // Obtain the camera position. - console.log('cameraType : ' + cameraArray[index].cameraType); // Obtain the camera type. - console.log('connectionType : ' + cameraArray[index].connectionType); // Obtain the camera connection type. -} - -// Create a camera input stream. -let cameraInput -try { - cameraInput = cameraManager.createCameraInput(cameraArray[0]); -} catch () { - console.error('Failed to createCameraInput errorCode = ' + error.code); -} - -// Listen for CameraInput errors. -let cameraDevice = cameraArray[0]; -cameraInput.on('error', cameraDevice, (error) => { - console.log(`Camera input error code: ${error.code}`); -}) - -// Open the camera. -await cameraInput.open(); - -// Obtain the output stream capabilities supported by the camera. -let cameraOutputCap = cameraManager.getSupportedOutputCapability(cameraArray[0]); -if (!cameraOutputCap) { - console.error("cameraManager.getSupportedOutputCapability error") - return; -} -console.info("outputCapability: " + JSON.stringify(cameraOutputCap)); - -let previewProfilesArray = cameraOutputCap.previewProfiles; -if (!previewProfilesArray) { - console.error("createOutput previewProfilesArray == null || undefined") -} - -let photoProfilesArray = cameraOutputCap.photoProfiles; -if (!photoProfilesArray) { - console.error("createOutput photoProfilesArray == null || undefined") -} - -let videoProfilesArray = cameraOutputCap.videoProfiles; -if (!videoProfilesArray) { - console.error("createOutput videoProfilesArray == null || undefined") -} - -let metadataObjectTypesArray = cameraOutputCap.supportedMetadataObjectTypes; -if (!metadataObjectTypesArray) { - console.error("createOutput metadataObjectTypesArray == null || undefined") -} - -// Create a preview stream. For details about the surfaceId parameter, see the XComponent section. The preview stream is the surface provided by the XComponent. -let previewOutput -try { - previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId) -} catch (error) { - console.error("Failed to create the PreviewOutput instance.") -} - -// Listen for PreviewOutput errors. -previewOutput.on('error', (error) => { - console.log(`Preview output error code: ${error.code}`); -}) - -// Create an ImageReceiver instance and set photo parameters. Wherein, the resolution must be one of the photographing resolutions supported by the current device, which are obtained by photoProfilesArray. -let imageReceiver = await image.createImageReceiver(1920, 1080, 4, 8) -// Obtain the surface ID for displaying the photos. -let photoSurfaceId = await imageReceiver.getReceivingSurfaceId() -// Create a photographing output stream. -let photoOutput -try { - photoOutput = cameraManager.createPhotoOutput(photoProfilesArray[0], photoSurfaceId) -} catch (error) { - console.error('Failed to createPhotoOutput errorCode = ' + error.code); -} - -// Define video recording parameters. -let videoConfig = { - audioSourceType: 1, - videoSourceType: 1, - profile: { - audioBitrate: 48000, - audioChannels: 2, - audioCodec: 'audio/mp4v-es', - audioSampleRate: 48000, - durationTime: 1000, - fileFormat: 'mp4', - videoBitrate: 48000, - videoCodec: 'video/mp4v-es', - videoFrameWidth: 640, - videoFrameHeight: 480, - videoFrameRate: 30 - }, - url: 'file:///data/media/01.mp4', - orientationHint: 0, - maxSize: 100, - maxDuration: 500, - rotation: 0 -} - -// Create a video recording output stream. -let videoRecorder -media.createVideoRecorder().then((recorder) => { - console.log('createVideoRecorder called') - videoRecorder = recorder -}) -// Set video recording parameters. -videoRecorder.prepare(videoConfig) -// Obtain the surface ID for video recording. -let videoSurfaceId -videoRecorder.getInputSurface().then((id) => { - console.log('getInputSurface called') - videoSurfaceId = id -}) - -// Create a VideoOutput instance. -let videoOutput -try { - videoOutput = cameraManager.createVideoOutput(videoProfilesArray[0], videoSurfaceId) -} catch (error) { - console.error('Failed to create the videoOutput instance. errorCode = ' + error.code); -} - -// Listen for VideoOutput errors. -videoOutput.on('error', (error) => { - console.log(`Preview output error code: ${error.code}`); -}) -``` -Surfaces must be created in advance for the preview, shooting, and video recording stream. The preview stream is the surface provided by the **XComponent**, the shooting stream is the surface provided by **ImageReceiver**, and the video recording stream is the surface provided by **VideoRecorder**. - -**XComponent** - -```typescript -mXComponentController: XComponentController = new XComponentController // Create an XComponentController. - -build() { - Flex() { - XComponent({ // Create an XComponent. - id: '', - type: 'surface', - libraryname: '', - controller: this.mXComponentController - }) - .onload(() => { // Set the onload callback. - // Set the surface width and height (1920 x 1080). For details about how to set the preview size, see the preview resolutions supported by the current device, which are obtained by previewProfilesArray. - this.mXComponentController.setXComponentSurfaceSize({surfaceWidth:1920,surfaceHeight:1080}) - // Obtain the surface ID. - globalThis.surfaceId = mXComponentController.getXComponentSurfaceId() - }) - .width('1920px') // Set the width of the XComponent. - .height('1080px') // Set the height of the XComponent. - } -} -``` - -**ImageReceiver** - -```typescript -function getImageReceiverSurfaceId() { - let receiver = image.createImageReceiver(640, 480, 4, 8) - console.log(TAG + 'before ImageReceiver check') - if (receiver !== undefined) { - console.log('ImageReceiver is ok') - surfaceId1 = receiver.getReceivingSurfaceId() - console.log('ImageReceived id: ' + JSON.stringify(surfaceId1)) - } else { - console.log('ImageReceiver is not ok') - } - } -``` - -**VideoRecorder** - -```typescript -function getVideoRecorderSurface() { - await getFd('CameraManager.mp4'); - mVideoConfig.url = mFdPath; - media.createVideoRecorder((err, recorder) => { - console.info('Entering create video receiver') - mVideoRecorder = recorder - console.info('videoRecorder is :' + JSON.stringify(mVideoRecorder)) - console.info('videoRecorder.prepare called.') - mVideoRecorder.prepare(mVideoConfig, (err) => { - console.info('videoRecorder.prepare success.') - mVideoRecorder.getInputSurface((err, id) => { - console.info('getInputSurface called') - mVideoSurface = id - console.info('getInputSurface surfaceId: ' + JSON.stringify(mVideoSurface)) - }) - }) - }) - } -``` - -#### Managing Sessions - -##### Creating a Session - -```typescript -// Create a session. -let captureSession -try { - captureSession = cameraManager.createCaptureSession() -} catch (error) { - console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code); -} - -// Listen for session errors. -captureSession.on('error', (error) => { - console.log(`Capture session error code: ${error.code}`); -}) - -// Start configuration for the session. -try { - captureSession.beginConfig() -} catch (error) { - console.error('Failed to beginConfig. errorCode = ' + error.code); -} - -// Add the camera input stream to the session. -try { - captureSession.addInput(cameraInput) -} catch (error) { - console.error('Failed to addInput. errorCode = ' + error.code); -} - -// Add the preview input stream to the session. -try { - captureSession.addOutput(previewOutput) -} catch (error) { - console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code); -} - -// Add the photographing output stream to the session. -try { - captureSession.addOutput(photoOutput) -} catch (error) { - console.error('Failed to addOutput(photoOutput). errorCode = ' + error.code); -} - -// Commit the session configuration. -await captureSession.commitConfig() - -// Start the session. -await captureSession.start().then(() => { - console.log('Promise returned to indicate the session start success.'); -}) -``` - -##### Switching a Session - -```typescript -// Stop the session. -await captureSession.stop() - -// Start configuration for the session. -try { - captureSession.beginConfig() -} catch (error) { - console.error('Failed to beginConfig. errorCode = ' + error.code); -} - -// Remove the photographing output stream from the session. -try { - captureSession.removeOutput(photoOutput) -} catch (error) { - console.error('Failed to removeOutput(photoOutput). errorCode = ' + error.code); -} - -// Add a video recording output stream to the session. -try { - captureSession.addOutput(videoOutput) -} catch (error) { - console.error('Failed to addOutput(videoOutput). errorCode = ' + error.code); -} - -// Commit the session configuration. -await captureSession.commitConfig() - -// Start the session. -await captureSession.start().then(() => { - console.log('Promise returned to indicate the session start success.'); -}) -``` - -#### Setting Parameters - -```typescript -// Check whether the camera has flash. -let flashStatus -try { - flashStatus = captureSession.hasFlash() -} catch (error) { - console.error('Failed to hasFlash. errorCode = ' + error.code); -} -console.log('Promise returned with the flash light support status:' + flashStatus); - -if (flashStatus) { - // Check whether the auto flash mode is supported. - let flashModeStatus - try { - let status = captureSession.isFlashModeSupported(camera.FlashMode.FLASH_MODE_AUTO) - flashModeStatus = status - } catch (error) { - console.error('Failed to check whether the flash mode is supported. errorCode = ' + error.code); - } - if(flashModeStatus) { - // Set the flash mode to auto. - try { - captureSession.setFlashMode(camera.FlashMode.FLASH_MODE_AUTO) - } catch (error) { - console.error('Failed to set the flash mode. errorCode = ' + error.code); - } - } -} - -// Check whether the continuous auto focus is supported. -let focusModeStatus -try { - let status = captureSession.isFocusModeSupported(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO) - focusModeStatus = status -} catch (error) { - console.error('Failed to check whether the focus mode is supported. errorCode = ' + error.code); -} - -if (focusModeStatus) { - // Set the focus mode to continuous auto focus. - try { - captureSession.setFocusMode(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO) - } catch (error) { - console.error('Failed to set the focus mode. errorCode = ' + error.code); - } -} - -// Obtain the zoom ratio range supported by the camera. -let zoomRatioRange -try { - zoomRatioRange = captureSession.getZoomRatioRange() -} catch (error) { - console.error('Failed to get the zoom ratio range. errorCode = ' + error.code); -} - -// Set a zoom ratio. -try { - captureSession.setZoomRatio(zoomRatioRange[0]) -} catch (error) { - console.error('Failed to set the zoom ratio value. errorCode = ' + error.code); -} -``` - -#### Taking Photos - -```typescript -let settings = { - quality: camera.QualityLevel.QUALITY_LEVEL_HIGH, // Set the image quality to high. - rotation: camera.ImageRotation.ROTATION_0 // Set the image rotation angle to 0. -} -// Use the current photographing settings to take photos. -photoOutput.capture(settings, async (err) => { - if (err) { - console.error('Failed to capture the photo ${err.message}'); - return; - } - console.log('Callback invoked to indicate the photo capture request success.'); -}); -``` - -#### Recording Videos - -```typescript -// Start the video recording output stream. -videoOutput.start(async (err) => { - if (err) { - console.error('Failed to start the video output ${err.message}'); - return; - } - console.log('Callback invoked to indicate the video output start success.'); -}); - -// Start video recording. -videoRecorder.start().then(() => { - console.info('videoRecorder start success'); -} - -// Stop video recording. -videoRecorder.stop().then(() => { - console.info('stop success'); -} - -// Stop the video recording output stream. -videoOutput.stop((err) => { - if (err) { - console.error('Failed to stop the video output ${err.message}'); - return; - } - console.log('Callback invoked to indicate the video output stop success.'); -}); -``` - -For details about the APIs used for saving photos, see [Image Processing](image.md#using-imagereceiver). - -#### Releasing Resources - -```typescript -// Stop the session. -captureSession.stop() - -// Release the camera input stream. -cameraInput.close() - -// Release the preview output stream. -previewOutput.release() - -// Release the photographing output stream. -photoOutput.release() - -// Release the video recording output stream. -videoOutput.release() - -// Release the session. -captureSession.release() - -// Set the session to null. -captureSession = null -``` - -## Process Flowchart - -The following figure shows the process of using the camera. -![camera_framework process](figures/camera_framework_process.png) diff --git a/en/application-dev/media/distributed-audio-playback.md b/en/application-dev/media/distributed-audio-playback.md new file mode 100644 index 0000000000000000000000000000000000000000..c56420de740e545168d009b5c743f2790146c475 --- /dev/null +++ b/en/application-dev/media/distributed-audio-playback.md @@ -0,0 +1,101 @@ +# Distributed Audio Playback (for System Applications Only) + +Distributed audio playback enables an application to continue audio playback on another device in the same network. + +You can use distributed audio playback to transfer all audio streams or the specified audio stream being played on the current device to a remote device. + +## How to Develop + +Before continuing audio playback on another device in the same network, you must obtain the device list on the network and listen for device connection state changes. For details, see [Audio Output Device Management](audio-output-device-management.md). + +When obtaining the device list on the network, you can specify **DeviceFlag** to filter out the required devices. + +| Name| Description| +| -------- | -------- | +| NONE_DEVICES_FLAG9+ | None. This is a system API.| +| OUTPUT_DEVICES_FLAG | Local output device.| +| INPUT_DEVICES_FLAG | Local input device.| +| ALL_DEVICES_FLAG | Local input and output device.| +| DISTRIBUTED_OUTPUT_DEVICES_FLAG9+ | Remote output device. This is a system API.| +| DISTRIBUTED_INPUT_DEVICES_FLAG9+ | Remote input device. This is a system API.| +| ALL_DISTRIBUTED_DEVICES_FLAG9+ | Remote input and output device. This is a system API.| + +For details about the API reference, see [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9). + +### Continuing the Playing of All Audio Streams + +1. [Obtain the output device information](audio-output-device-management.md#obtaining-output-device-information). + +2. Create an **AudioDeviceDescriptor** instance to describe an audio output device. + +3. Call **selectOutputDevice** to select a remote device, on which all the audio streams will continue playing. + +```ts +let outputAudioDeviceDescriptor = [{ + deviceRole: audio.DeviceRole.OUTPUT_DEVICE, + deviceType: audio.DeviceType.SPEAKER, + id: 1, + name: "", + address: "", + sampleRates: [44100], + channelCounts: [2], + channelMasks: [0], + networkId: audio.LOCAL_NETWORK_ID, + interruptGroupId: 1, + volumeGroupId: 1, +}]; + +async function selectOutputDevice() { + audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor, (err) => { + if (err) { + console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Invoke selectOutputDevice succeeded.'); + } + }); +} +``` + +### Continuing the Playing of the Specified Audio Stream + +1. [Obtain the output device information](audio-output-device-management.md#obtaining-output-device-information). + +2. Create an **AudioRendererFilter** instance, with **uid** to specify an application and **rendererId** to specify an audio stream. + +3. Create an **AudioDeviceDescriptor** instance to describe an audio output device. + +4. Call **selectOutputDeviceByFilter** to select a remote device, on which the specified audio stream will continue playing. + +```ts +let outputAudioRendererFilter = { + uid: 20010041, + rendererInfo: { + content: audio.ContentType.CONTENT_TYPE_MUSIC, + usage: audio.StreamUsage.STREAM_USAGE_MEDIA, + rendererFlags: 0 }, + rendererId: 0 }; + +let outputAudioDeviceDescriptor = [{ + deviceRole: audio.DeviceRole.OUTPUT_DEVICE, + deviceType: audio.DeviceType.SPEAKER, + id: 1, + name: "", + address: "", + sampleRates: [44100], + channelCounts: [2], + channelMasks: [0], + networkId: audio.LOCAL_NETWORK_ID, + interruptGroupId: 1, + volumeGroupId: 1, +}]; + +async function selectOutputDeviceByFilter() { + audioRoutingManager.selectOutputDeviceByFilter(outputAudioRendererFilter, outputAudioDeviceDescriptor, (err) => { + if (err) { + console.error(`Invoke selectOutputDeviceByFilter failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Invoke selectOutputDeviceByFilter succeeded.'); + } + }); +} +``` diff --git a/en/application-dev/media/distributed-avsession-overview.md b/en/application-dev/media/distributed-avsession-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..ff293ed7332d0a9c5e66632f91c943af42d28030 --- /dev/null +++ b/en/application-dev/media/distributed-avsession-overview.md @@ -0,0 +1,54 @@ +# Distributed AVSession Overview + +With distributed AVSession, OpenHarmony allows users to project locally played media to a distributed device for a better playback effect. For example, users can project audio played on a tablet to a smart speaker. + +After the user initiates a projection, the media information is synchronized to the distributed device in real time, and the user can control the playback (for example, previous, next, play, and pause) on the distributed device. From the perspective of the user, the playback control operation on the distributed device is the same as that on the local device. + + +## Interaction Process + +After the local device is paired with a distributed device, the controller on the local device projects media to the distributed device through AVSessionManager, thereby implementing a distributed AVSession. The interaction process is shown below. + +![Distributed AVSession Interaction Process](figures/distributed-avsession-interaction-process.png) + +The AVSession service on the distributed device automatically creates an **AVSession** object for information synchronization with the local device. The information to synchronize includes the session information, control commands, and events. + +## Distributed AVSession Process + +After the user triggers a projection, the remote device automatically creates an **AVSession** object to associate it with that on the local device. The detailed process is as follows: + +1. After receiving an audio device switching command, the AVSession service on the local device synchronizes the session information to the distributed device. + +2. The controller (for example, Media Controller) on the distributed device detects the new **AVSession** object and creates an **AVSessionController** object for it. + +3. Through the **AVSessionController** object, the controller on the distributed device sends a control command to the **AVSession** object on the local device. + +4. Upon the receipt of the control command, the **AVSession** object on the local device triggers a callback to the local audio application. + +5. The **AVSession** object on the local device synchronizes the new session information to the controller on the distributed device in real time. + +6. When the remote device is disconnected, the audio stream is switched back to the local device and the playback is paused. (The audio module completes the switchback, and the AVSession service instructs the application to pause the playback.) + +## Distributed AVSession Scenarios + +There are two scenarios for projection implemented using the distributed AVSession: + +- System projection: The controller (for example, Media Controller) initiates a projection. + +This type of projection takes effect for all applications. After a system projection, all audios on the local device are played from the distributed device by default. + +- Application projection: An audio and video application integrates the projection component to initiate a projection. (This scenario is not supported yet.) + + This type of projection takes effect for a single application. After an application projection, audio of the application on the local device is played from the distributed device, and audio of other applications is still played from the local device. + +Projection preemption is supported. If application A initiates a projection to a remote device and then application B initiates a projection to the same device, then audio of application B is played on the remote device. + +## Relationship Between Distributed AVSession and Distributed Audio Playback + +The internal logic for the distributed AVSession to implement projection is as follows: + +- API related to [distributed audio playback](distributed-audio-playback.md) are called to project audio streams to the distributed device. + +- The distributed capability is used to project the session metadata to the distributed device for display. + +Projection implemented by using the distributed AVSession not only enables audio to be played on the distributed device, but also enables media information to be displayed on the distributed device. It also allows the user to perform playback control on the distributed device. diff --git a/en/application-dev/media/figures/audio-capturer-state.png b/en/application-dev/media/figures/audio-capturer-state.png deleted file mode 100644 index 52b5556260dbf78c5e816b37013248a07e8dbbc6..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/audio-capturer-state.png and /dev/null differ diff --git a/en/application-dev/media/figures/audio-playback-interaction-diagram.png b/en/application-dev/media/figures/audio-playback-interaction-diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..b96179b6b610463bc34d2515b145a57b29e574cb Binary files /dev/null and b/en/application-dev/media/figures/audio-playback-interaction-diagram.png differ diff --git a/en/application-dev/media/figures/audio-renderer-state.png b/en/application-dev/media/figures/audio-renderer-state.png deleted file mode 100644 index 9ae30c2a9306dc85662405c36da9e11d07ed9a2a..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/audio-renderer-state.png and /dev/null differ diff --git a/en/application-dev/media/figures/audio-stream-mgmt-invoking-relationship.png b/en/application-dev/media/figures/audio-stream-mgmt-invoking-relationship.png new file mode 100644 index 0000000000000000000000000000000000000000..50ad902dd8b55a91a220e2705fea5674cd855ae6 Binary files /dev/null and b/en/application-dev/media/figures/audio-stream-mgmt-invoking-relationship.png differ diff --git a/en/application-dev/media/figures/audiocapturer-status-change.png b/en/application-dev/media/figures/audiocapturer-status-change.png new file mode 100644 index 0000000000000000000000000000000000000000..aadbc4fb6470b7cdc0f399ee5954a96c01a7f7c3 Binary files /dev/null and b/en/application-dev/media/figures/audiocapturer-status-change.png differ diff --git a/en/application-dev/media/figures/audiorenderer-status-change.png b/en/application-dev/media/figures/audiorenderer-status-change.png new file mode 100644 index 0000000000000000000000000000000000000000..a721044f7aeccfed0260176963d192cac40dd8a6 Binary files /dev/null and b/en/application-dev/media/figures/audiorenderer-status-change.png differ diff --git a/en/application-dev/media/figures/avsession-interaction-process.png b/en/application-dev/media/figures/avsession-interaction-process.png new file mode 100644 index 0000000000000000000000000000000000000000..2347599b7d118c45c2d2eb58708729f91c4dc801 Binary files /dev/null and b/en/application-dev/media/figures/avsession-interaction-process.png differ diff --git a/en/application-dev/media/figures/bitmap-operation.png b/en/application-dev/media/figures/bitmap-operation.png new file mode 100644 index 0000000000000000000000000000000000000000..c5107dbabd86fdc29863d5f25947b447d9c1deeb Binary files /dev/null and b/en/application-dev/media/figures/bitmap-operation.png differ diff --git a/en/application-dev/media/figures/camera-development-model.png b/en/application-dev/media/figures/camera-development-model.png new file mode 100644 index 0000000000000000000000000000000000000000..fa97f369dda840cb474bc8fffbb7396b8a7b6508 Binary files /dev/null and b/en/application-dev/media/figures/camera-development-model.png differ diff --git a/en/application-dev/media/figures/camera-workflow.png b/en/application-dev/media/figures/camera-workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..31a7e814724cf97a80a5cc8b88778334ccb352fb Binary files /dev/null and b/en/application-dev/media/figures/camera-workflow.png differ diff --git a/en/application-dev/media/figures/camera_framework_process.png b/en/application-dev/media/figures/camera_framework_process.png deleted file mode 100644 index bf4b6806fb19e087318306dbc7f9a4b0576273cd..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/camera_framework_process.png and /dev/null differ diff --git a/en/application-dev/media/figures/cropping.jpeg b/en/application-dev/media/figures/cropping.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..a564818815eb3fde13a40ef02d0811bd56803fb9 Binary files /dev/null and b/en/application-dev/media/figures/cropping.jpeg differ diff --git a/en/application-dev/media/figures/distributed-avsession-interaction-process.png b/en/application-dev/media/figures/distributed-avsession-interaction-process.png new file mode 100644 index 0000000000000000000000000000000000000000..d16e362db22857b2ddba3cdbf2142c3759f73fc8 Binary files /dev/null and b/en/application-dev/media/figures/distributed-avsession-interaction-process.png differ diff --git a/en/application-dev/media/figures/en-us_image_audio_player.png b/en/application-dev/media/figures/en-us_image_audio_player.png deleted file mode 100644 index 4edcec759e7b8507d605823f157ba9c6c1108fcd..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_player.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_audio_recorder_state_machine.png b/en/application-dev/media/figures/en-us_image_audio_recorder_state_machine.png deleted file mode 100644 index 8cd657cf19c48da5e52809bad387984f50d5a3c7..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_recorder_state_machine.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_audio_recorder_zero.png b/en/application-dev/media/figures/en-us_image_audio_recorder_zero.png deleted file mode 100644 index 7c33fcc1723fcdcc468bd3a6004de8b03b20100b..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_recorder_zero.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_audio_routing_manager.png b/en/application-dev/media/figures/en-us_image_audio_routing_manager.png deleted file mode 100644 index 710679f6cac0c30d06dffa97b0e80b3cebe80f79..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_routing_manager.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_audio_state_machine.png b/en/application-dev/media/figures/en-us_image_audio_state_machine.png deleted file mode 100644 index 22b7aeaa1db5b369d3daf44854d7f7f9a00f775b..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_state_machine.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_audio_stream_manager.png b/en/application-dev/media/figures/en-us_image_audio_stream_manager.png deleted file mode 100644 index 1f326d4bd0798dd5ecc0b55130904cbf87d2ea1f..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_stream_manager.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_audio_volume_manager.png b/en/application-dev/media/figures/en-us_image_audio_volume_manager.png deleted file mode 100644 index 0d47fbfacce9c1ff48811e1cf5d764231bdb596b..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_audio_volume_manager.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_avplayer_audio.png b/en/application-dev/media/figures/en-us_image_avplayer_audio.png deleted file mode 100644 index b5eb9b02a977d0e4551a236c7cc8a154710f5517..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_avplayer_audio.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_avplayer_state_machine.png b/en/application-dev/media/figures/en-us_image_avplayer_state_machine.png deleted file mode 100644 index aa8afdbcbf142fd745cee03fc422caec51cfe41b..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_avplayer_state_machine.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_avplayer_video.png b/en/application-dev/media/figures/en-us_image_avplayer_video.png deleted file mode 100644 index 54525ebed1d1792f43156ffbeb1ffa37f56d8237..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_avplayer_video.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_avrecorder_module_interaction.png b/en/application-dev/media/figures/en-us_image_avrecorder_module_interaction.png deleted file mode 100644 index 7d5da3bdc91fe8fb7be9f0b4054f934ec054b8e6..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_avrecorder_module_interaction.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_avrecorder_state_machine.png b/en/application-dev/media/figures/en-us_image_avrecorder_state_machine.png deleted file mode 100644 index 7ffcb21f09365e9b072bdaf48f8b98d7d45a8aaa..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_avrecorder_state_machine.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_avsession.png b/en/application-dev/media/figures/en-us_image_avsession.png deleted file mode 100644 index 3289bc4ca3c54eb3e99c9230c821380f8f7c0c5b..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_avsession.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_video_player.png b/en/application-dev/media/figures/en-us_image_video_player.png deleted file mode 100644 index f9b4aabdc7215f22788d92c68ef353fafffda1c3..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_video_player.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_video_recorder_state_machine.png b/en/application-dev/media/figures/en-us_image_video_recorder_state_machine.png deleted file mode 100644 index 3e81dcc18d1f47b6de087a7a88fd75b308ea51a0..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_video_recorder_state_machine.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_video_recorder_zero.png b/en/application-dev/media/figures/en-us_image_video_recorder_zero.png deleted file mode 100644 index a7f7fa09392eb916132d891a84d62f31f0f27782..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_video_recorder_zero.png and /dev/null differ diff --git a/en/application-dev/media/figures/en-us_image_video_state_machine.png b/en/application-dev/media/figures/en-us_image_video_state_machine.png deleted file mode 100644 index c0595ed5120b632142d6da8841c9e45277b10f55..0000000000000000000000000000000000000000 Binary files a/en/application-dev/media/figures/en-us_image_video_state_machine.png and /dev/null differ diff --git a/en/application-dev/media/figures/horizontal-flip.jpeg b/en/application-dev/media/figures/horizontal-flip.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..f43e4f6ab2adc68bf0f90eaf8177d36ee91f32ac Binary files /dev/null and b/en/application-dev/media/figures/horizontal-flip.jpeg differ diff --git a/en/application-dev/media/figures/image-development-process.png b/en/application-dev/media/figures/image-development-process.png new file mode 100644 index 0000000000000000000000000000000000000000..47db9d3faf7f8bffc80f63995dc73d0ad32799e5 Binary files /dev/null and b/en/application-dev/media/figures/image-development-process.png differ diff --git a/en/application-dev/media/figures/invoking-relationship-recording-stream-mgmt.png b/en/application-dev/media/figures/invoking-relationship-recording-stream-mgmt.png new file mode 100644 index 0000000000000000000000000000000000000000..a1f404f67bf18d91c2cc42ab65d8c7c5f01518a8 Binary files /dev/null and b/en/application-dev/media/figures/invoking-relationship-recording-stream-mgmt.png differ diff --git a/en/application-dev/media/figures/local-avsession-interaction-process.png b/en/application-dev/media/figures/local-avsession-interaction-process.png new file mode 100644 index 0000000000000000000000000000000000000000..dfccf9c6874f26a7e030189191f34248b7230b1a Binary files /dev/null and b/en/application-dev/media/figures/local-avsession-interaction-process.png differ diff --git a/en/application-dev/media/figures/media-system-framework.png b/en/application-dev/media/figures/media-system-framework.png new file mode 100644 index 0000000000000000000000000000000000000000..f1b92795c05db2caa6869acfba865f585a947c19 Binary files /dev/null and b/en/application-dev/media/figures/media-system-framework.png differ diff --git a/en/application-dev/media/figures/offsets.jpeg b/en/application-dev/media/figures/offsets.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..ab4c87a69bae55a62feddc0ca61a0ef1081bf199 Binary files /dev/null and b/en/application-dev/media/figures/offsets.jpeg differ diff --git a/en/application-dev/media/figures/original-drawing.jpeg b/en/application-dev/media/figures/original-drawing.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..01a0b0d7022dfc0130029154fec7321bc62dfe36 Binary files /dev/null and b/en/application-dev/media/figures/original-drawing.jpeg differ diff --git a/en/application-dev/media/figures/photographing-development-process.png b/en/application-dev/media/figures/photographing-development-process.png new file mode 100644 index 0000000000000000000000000000000000000000..b7ee61acfa63da55ef1389212e090da14a091a68 Binary files /dev/null and b/en/application-dev/media/figures/photographing-development-process.png differ diff --git a/en/application-dev/media/figures/playback-status-change.png b/en/application-dev/media/figures/playback-status-change.png new file mode 100644 index 0000000000000000000000000000000000000000..860764d3d15b93e544a6f27316584963acba2f0f Binary files /dev/null and b/en/application-dev/media/figures/playback-status-change.png differ diff --git a/en/application-dev/media/figures/recording-development-process.png b/en/application-dev/media/figures/recording-development-process.png new file mode 100644 index 0000000000000000000000000000000000000000..c29043a1f8b9255664969b4e0b0a1ca971d4e1f7 Binary files /dev/null and b/en/application-dev/media/figures/recording-development-process.png differ diff --git a/en/application-dev/media/figures/recording-status-change.png b/en/application-dev/media/figures/recording-status-change.png new file mode 100644 index 0000000000000000000000000000000000000000..9f15af9c1992e34fa7d750d08fd0245b6cb3ba67 Binary files /dev/null and b/en/application-dev/media/figures/recording-status-change.png differ diff --git a/en/application-dev/media/figures/rotate.jpeg b/en/application-dev/media/figures/rotate.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..5965abb46dc9648a3dfd9136e7cc0b5c5203e6a7 Binary files /dev/null and b/en/application-dev/media/figures/rotate.jpeg differ diff --git a/en/application-dev/media/figures/transparency.png b/en/application-dev/media/figures/transparency.png new file mode 100644 index 0000000000000000000000000000000000000000..b9b43939f0dad8ee40bf0b6b7e40ddf49d141c66 Binary files /dev/null and b/en/application-dev/media/figures/transparency.png differ diff --git a/en/application-dev/media/figures/vertical-flip.jpeg b/en/application-dev/media/figures/vertical-flip.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..8ef368d6bb914815a90c8d82352cbd6fd9ab505c Binary files /dev/null and b/en/application-dev/media/figures/vertical-flip.jpeg differ diff --git a/en/application-dev/media/figures/video-playback-interaction-diagram.png b/en/application-dev/media/figures/video-playback-interaction-diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..93778e5fd397820e92b03f60a01076f251348ee6 Binary files /dev/null and b/en/application-dev/media/figures/video-playback-interaction-diagram.png differ diff --git a/en/application-dev/media/figures/video-playback-status-change.png b/en/application-dev/media/figures/video-playback-status-change.png new file mode 100644 index 0000000000000000000000000000000000000000..860764d3d15b93e544a6f27316584963acba2f0f Binary files /dev/null and b/en/application-dev/media/figures/video-playback-status-change.png differ diff --git a/en/application-dev/media/figures/video-recording-interaction-diagram.png b/en/application-dev/media/figures/video-recording-interaction-diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..3fbbffe30f5ab06ba0f0a9e6487c76cecd5546c4 Binary files /dev/null and b/en/application-dev/media/figures/video-recording-interaction-diagram.png differ diff --git a/en/application-dev/media/figures/video-recording-status-change.png b/en/application-dev/media/figures/video-recording-status-change.png new file mode 100644 index 0000000000000000000000000000000000000000..9f15af9c1992e34fa7d750d08fd0245b6cb3ba67 Binary files /dev/null and b/en/application-dev/media/figures/video-recording-status-change.png differ diff --git a/en/application-dev/media/figures/zoom.jpeg b/en/application-dev/media/figures/zoom.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..977db6cfbc5b81f5396e4d81f8954a9f7d4168e4 Binary files /dev/null and b/en/application-dev/media/figures/zoom.jpeg differ diff --git a/en/application-dev/media/image-decoding.md b/en/application-dev/media/image-decoding.md new file mode 100644 index 0000000000000000000000000000000000000000..00665aa430fb0d2ab95007f29d39b8adc5c5433c --- /dev/null +++ b/en/application-dev/media/image-decoding.md @@ -0,0 +1,143 @@ +# Image Decoding + +Image decoding refers to the process of decoding an archived image in a supported format into a [pixel map](image-overview.md) for image display or [processing](image-transformation.md). Currently, the following image formats are supported: JPEG, PNG, GIF, RAW, WebP, BMP, and SVG. + +## How to Develop + +Read [Image](../reference/apis/js-apis-image.md#imagesource) for APIs related to image decoding. + +1. Import the image module. + + ```ts + import image from '@ohos.multimedia.image'; + ``` + +2. Obtain an image. + - Method 1: Obtain the sandbox path. For details about how to obtain the sandbox path, see [Obtaining the Application Development Path](../application-models/application-context-stage.md#obtaining-the-application-development-path). For details about the application sandbox and how to push files to the application sandbox, see [File Management](../file-management/app-sandbox-directory.md). + + ```ts + // Code on the stage model + const context = getContext(this); + const filePath = context.cacheDir + '/test.jpg'; + ``` + + ```ts + // Code on the FA model + import featureAbility from '@ohos.ability.featureAbility'; + + const context = featureAbility.getContext(); + const filePath = context.getCacheDir() + "/test.jpg"; + ``` + - Method 2: Obtain the file descriptor of the image through the sandbox path. For details, see [file.fs API Reference] (../reference/apis/js-apis-file-fs.md). + To use this method, you must import the \@ohos.file.fs module first. + + ```ts + import fs from '@ohos.file.fs'; + ``` + + Then call **fs.openSync()** to obtain the file descriptor. + + ```ts + // Code on the stage model + const context = getContext(this); + const filePath = context.cacheDir + '/test.jpg'; + const file = fs.openSync(filePath, fs.OpenMode.READ_WRITE); + const fd = file?.fd; + ``` + + ```ts + // Code on the FA model + import featureAbility from '@ohos.ability.featureAbility'; + + const context = featureAbility.getContext(); + const filePath = context.getCacheDir() + "/test.jpg"; + const file = fs.openSync(filePath, fs.OpenMode.READ_WRITE); + const fd = file?.fd; + ``` + - Method 3: Obtain the array buffer of the resource file through the resource manager. For details, see [ResourceManager API Reference](../reference/apis/js-apis-resource-manager.md#getrawfilecontent9-1). + + ```ts + // Code on the stage model + const context = getContext(this); + // Obtain a resource manager. + const resourceMgr = context.resourceManager; + ``` + + ```ts + // Code on the FA model + // Import the resourceManager module. + import resourceManager from '@ohos.resourceManager'; + const resourceMgr = await resourceManager.getResourceManager(); + ``` + + The method of obtaining the resource manager varies according to the application model. After obtaining the resource manager, call **resourceMgr.getRawFileContent()** to obtain the array buffer of the resource file. + + ```ts + const fileData = await resourceMgr.getRawFileContent('test.jpg'); + // Obtain the array buffer of the image. + const buffer = fileData.buffer; + ``` + +3. Create an **ImageSource** instance. + - Method 1: Create an **ImageSource** instance using the sandbox path. The sandbox path can be obtained by using method 1 in step 2. + + ```ts + // path indicates the obtained sandbox path. + const imageSource = image.createImageSource(filePath); + ``` + - Method 2: Create an **ImageSource** instance using the file descriptor. The file descriptor can be obtained by using method 2 in step 2. + + ```ts + // fd is the obtained file descriptor. + const imageSource = image.createImageSource(fd); + ``` + - Method 3: Create an **ImageSource** instance using a buffer array. The buffer array can be obtained by using method 3 in step 2. + + ```ts + const imageSource = image.createImageSource(buffer); + ``` + +4. Set **DecodingOptions** and decode the image to obtain a pixel map. + + ```ts + let decodingOptions = { + editable: true, + desiredPixelFormat: 3, + } + // Create a pixel map and perform rotation and scaling on it. + const pixelMap = await imageSource.createPixelMap(decodingOptions); + ``` + + After the decoding is complete and the pixel map is obtained, you can perform subsequent [image processing](image-transformation.md). + +## Sample Code - Decoding an Image in Resource Files + +1. Obtain a resource manager. + + ```ts + const context = getContext(this); + // Obtain a resourceManager instance. + const resourceMgr = context.resourceManager; + ``` + +2. Obtain the array buffer of the **test.jpg** file in the **rawfile** folder. + + ```ts + const fileData = await resourceMgr.getRawFileContent('test.jpg'); + // Obtain the array buffer of the image. + const buffer = fileData.buffer; + ``` + +3. Create an **ImageSource** instance. + + ```ts + const imageSource = image.createImageSource(buffer); + ``` + +4. Create a **PixelMap** instance. + + ```ts + const pixelMap = await imageSource.createPixelMap(); + ``` + + \ No newline at end of file diff --git a/en/application-dev/media/image-encoding.md b/en/application-dev/media/image-encoding.md new file mode 100644 index 0000000000000000000000000000000000000000..96e23b6ba16c63bdaf282dbaf9abc01d95dd6221 --- /dev/null +++ b/en/application-dev/media/image-encoding.md @@ -0,0 +1,48 @@ +# Image Encoding + +Image encoding refers to the process of encoding a pixel map into an archived image in different formats (only in JPEG and WebP currently) for subsequent processing, such as storage and transmission. + +## How to Develop + +Read [Image](../reference/apis/js-apis-image.md#imagepacker) for APIs related to image encoding. + +1. Create an **ImagePacker** object. + + ```ts + // Import the required module. + import image from '@ohos.multimedia.image'; + + const imagePackerApi = image.createImagePacker(); + ``` + +2. Set the encoding output stream and encoding parameters. + + **format** indicates the image encoding format, and **quality** indicates the image quality. The value ranges from 0 to 100, and the value 100 indicates the optimal quality. + + ```ts + let packOpts = { format:"image/jpeg", quality:98 }; + ``` + +3. [Create a PixelMap object or an ImageSource object](image-decoding.md). + +4. Encode the image and save the encoded image. + + Method 1: Use the **PixelMap** object for encoding. + + ```ts + imagePackerApi.packing(pixelMap, packOpts).then( data => { + // data is the file stream obtained after packing. You can write the file and save it to obtain an image. + }).catch(error => { + console.error('Failed to pack the image. And the error is: ' + error); + }) + ``` + + Method 2: Use the **ImageSource** object for encoding. + + ```ts + imagePackerApi.packing(imageSource, packOpts).then( data => { + // data is the file stream obtained after packing. You can write the file and save it to obtain an image. + }).catch(error => { + console.error('Failed to pack the image. And the error is: ' + error); + }) + ``` diff --git a/en/application-dev/media/image-overview.md b/en/application-dev/media/image-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..a88eb049166b845068a67eecec5a613435d124ab --- /dev/null +++ b/en/application-dev/media/image-overview.md @@ -0,0 +1,40 @@ +# Image Overview + +Image development is the process of parsing, processing, and constructing image pixel data to achieve the required image effect. Image development mainly involves image decoding, processing, and encoding. + +Before image development, be familiar with the following basic concepts: + +- Image decoding + + The operation of decoding an archived image in a supported format into a pixel map for image display or processing. Currently, the following image formats are supported: JPEG, PNG, GIF, RAW, WebP, BMP, and SVG. + +- Pixel map + + A bitmap that is not compressed after being decoded. It is used for image display or processing. + +- Image processing + + A series of operations on the pixel map, such as rotation, scaling, opacity setting, image information obtaining, and pixel data reading and writing. + +- Image encoding + + The operation of encoding a pixel map into an archived image in different formats (only in JPEG and WebP currently) for subsequent processing, such as storage and transmission. + +The figure below illustrates the image development process. + +**Figure 1** Image development process +![Image development process](figures/image-development-process.png) + +1. Image retrieval: Obtain a raw image through the application sandbox. + +2. Instance creation: Create an **ImageSource** instance, which is the source class of decoded images and is used to obtain or modify image information. + +3. [Image decoding](image-decoding.md): Decode the image source to generate a pixel map. + +4. [Image processing](image-transformation.md): Process the pixel map by modifying the image attributes to implement image rotation, scaling, and cropping, and then use the [Image component](../ui/arkts-graphics-display.md) to display the image. + +5. [Image encoding](image-encoding.md): Use the **ImagePacker** class to compress and encode the pixel map or image source to generate a new image. + +In addition to the preceding basic image development capabilities, OpenHarmony provides the [image tool](image-tool.md) to ease your development. + + \ No newline at end of file diff --git a/en/application-dev/media/image-pixelmap-operation.md b/en/application-dev/media/image-pixelmap-operation.md new file mode 100644 index 0000000000000000000000000000000000000000..d9b17b2c4dc5e5911e921d19a46d1b3066af5100 --- /dev/null +++ b/en/application-dev/media/image-pixelmap-operation.md @@ -0,0 +1,60 @@ +# Pixel Map Operation + +To process a certain area in an image, you can perform pixel map operations, which are usually used to beautify the image. + +As shown in the figure below, the pixel data of a rectangle in an image is read, modified, and then written back to the corresponding area of the original image. + +**Figure 1** Pixel map operation +![Pixel map operation](figures/bitmap-operation.png) + +## How to Develop + +Read [Image](../reference/apis/js-apis-image.md#pixelmap7) for APIs related to pixel map operations. + +1. Complete [image decoding](image-decoding.md#how-to-develop) and obtain a **PixelMap** object. + +2. Obtain information from the **PixelMap** object. + + ```ts + // Obtain the total number of bytes of this pixel map. + let pixelBytesNumber = pixelMap.getPixelBytesNumber(); + // Obtain the number of bytes per row of this pixel map. + let rowCount = pixelMap.getBytesNumberPerRow(); + // Obtain the pixel density of the image. Pixel density refers to the number of pixels per inch of an image. A larger value of the pixel density indicates a finer image. + let getDensity = pixelMap.getDensity(); + ``` + +3. Read and modify the pixel data of the target area, and write the modified data back to the original image. + + ```ts + // Scenario 1: Read the pixel data of the entire image and write the modified data to an array buffer. + const readBuffer = new ArrayBuffer(pixelBytesNumber); + pixelMap.readPixelsToBuffer(readBuffer).then(() => { + console.info('Succeeded in reading image pixel data.'); + }).catch(error => { + console.error('Failed to read image pixel data. And the error is: ' + error); + }) + + // Scenario 2: Read the pixel data in a specified area and write the modified data to area.pixels. + const area = { + pixels: new ArrayBuffer(8), + offset: 0, + stride: 8, + region: { size: { height: 1, width: 2 }, x: 0, y: 0 } + } + pixelMap.readPixels(area).then(() => { + console.info('Succeeded in reading the image data in the area.'); + }).catch(error => { + console.error('Failed to read the image data in the area. And the error is: ' + error); + }) + + // The read image data can be used independently (by creating a pixel map) or modified as required. + // Write area.pixels to the specified area. + pixelMap.writePixels(area).then(() => { + console.info('Succeeded to write pixelMap into the specified area.'); + }) + + // Write the image data result to a pixel map. + const writeColor = new ArrayBuffer(96); + pixelMap.writeBufferToPixels(writeColor, () => {}); + ``` diff --git a/en/application-dev/media/image-tool.md b/en/application-dev/media/image-tool.md new file mode 100644 index 0000000000000000000000000000000000000000..16748ff0b56557005793cdbe2798477995412cdf --- /dev/null +++ b/en/application-dev/media/image-tool.md @@ -0,0 +1,43 @@ +# Image Tool + +The image tool provides the capabilities of reading and editing Exchangeable Image File Format (EXIF) data of an image. + +EXIF is a file format dedicated for photos taken by digital cameras and is used to record attributes and shooting data of the photos. Currently, the image tool supports images in JPEG format only. + +Users may need to view or modify the EXIF data of photos in the Gallery application, for example, when the manual lens parameters of the camera are not automatically written as part of the EXIF data or the shooting time is incorrect due to camera power-off. + +Currently, OpenHarmony allows you to view and modify part of EXIF data. For details, see [EIXF](../reference/apis/js-apis-image.md#propertykey7). + +## How to Develop + +Read [Image](../reference/apis/js-apis-image.md#getimageproperty7) for APIs used to read and edit EXIF data. + +1. Obtain the image and create an **ImageSource** object. + + ```ts + // Import the required module. + import image from '@ohos.multimedia.image'; + + // Obtain the sandbox path and create an ImageSource object. + const fd =...; //Obtain the file descriptor of the image to be processed. + const imageSource = image.createImageSource(fd); + ``` + +2. Read and edit EXIF data. + + ```ts + // Read the EXIF data, where BitsPerSample indicates the number of bits per pixel. + imageSource.getImageProperty('BitsPerSample', (error, data) => { + if (error) { + console.error('Failed to get the value of the specified attribute key of the image.And the error is: ' + error); + } else { + console.info('Succeeded in getting the value of the specified attribute key of the image ' + data); + } + }) + + // Edit the EXIF data. + imageSource.modifyImageProperty('ImageWidth', '120').then(() => { + const width = imageSource.getImageProperty("ImageWidth"); + console.info('The new imageWidth is ' + width); + }) + ``` diff --git a/en/application-dev/media/image-transformation.md b/en/application-dev/media/image-transformation.md new file mode 100644 index 0000000000000000000000000000000000000000..8965d409dda0fa9271feebb34b3b936c4b624bc6 --- /dev/null +++ b/en/application-dev/media/image-transformation.md @@ -0,0 +1,93 @@ +# Image Transformation + +Image processing refers to a series of operations performed on the pixel map, such as obtaining image information, cropping, scaling, translating, rotating, flipping, setting opacity, and reading and writing pixel data. These operations can be classified into image transformation and [pixel map operation](image-pixelmap-operation.md). This topic describes the image transformation operations that you can perform. + +## How to Develop + +Read [Image](../reference/apis/js-apis-image.md#pixelmap7) for APIs related to image transformation. + +1. Complete [image decoding](image-decoding.md#how-to-develop) and obtain a **PixelMap** object. + +2. Obtain image information. + + ``` + // Obtain the image size. + pixelMap.getImageInfo().then( info => { + console.info('info.width = ' + info.size.width); + console.info('info.height = ' + info.size.height); + }).catch((err) => { + console.error("Failed to obtain the image pixel map information.And the error is: " + err); + }); + ``` + +3. Perform image transformation. + + Original image: + + ![Original drawing](figures/original-drawing.jpeg) + - Crop the image. + + ``` + // x: x-axis coordinate of the start point for cropping (0). + // y: y-axis coordinate of the start point for cropping (0). + // height: height after cropping (400), cropping from top to bottom. + // width: width after cropping (400), cropping from left to right. + pixelMap.crop({x: 0, y: 0, size: { height: 400, width: 400 } }); + ``` + + ![cropping](figures/cropping.jpeg) + + - Scale the image. + + ``` + // The width of the image after scaling is 0.5 of the original width. + // The height of the image after scaling is 0.5 of the original height. + pixelMap.scale(0.5, 0.5); + ``` + + ![zoom](figures/zoom.jpeg) + + - Translate the image. + + ``` + // Translate the image by 100 units downwards. + // Translate the image by 100 units to the right. + pixelMap.translate(100, 100); + ``` + + ![offsets](figures/offsets.jpeg) + + - Rotate the image. + + ``` + // Rate the image clockwise by 90°. + pixelMap.rotate(90); + ``` + + ![rotate](figures/rotate.jpeg) + + - Flip the image. + + ``` + // Flip the image vertically. + pixelMap.flip(false, true); + ``` + + ![Vertical Flip](figures/vertical-flip.jpeg) + + + ``` + // Flip the image horizontally. + pixelMap.flip(true, false); + ``` + + ![Horizontal Flip](figures/horizontal-flip.jpeg) + + - Set the opacity of the image. + + ``` + // Set the opacity to 0.5. + pixelMap.opacity(0.5); + ``` + + ![Transparency](figures/transparency.png) diff --git a/en/application-dev/media/image.md b/en/application-dev/media/image.md deleted file mode 100644 index fb4e648b56839ef76cb0e5277443605734d7ab6f..0000000000000000000000000000000000000000 --- a/en/application-dev/media/image.md +++ /dev/null @@ -1,283 +0,0 @@ -# Image Development - -## When to Use - -You can use image development APIs to decode images into pixel maps and encode the pixel maps into a supported format. - -## Available APIs - -For details about the APIs, see [Image Processing](../reference/apis/js-apis-image.md). - -## How to Develop - -### Full-Process Scenario - -The full process includes creating an instance, reading image information, reading and writing pixel maps, updating data, packaging pixels, and releasing resources. - -```js -const color = new ArrayBuffer(96); // Create a buffer to store image pixel data. -let opts = { alphaType: 0, editable: true, pixelFormat: 4, scaleMode: 1, size: { height: 2, width: 3 } } // Image pixel data. - -// Create a PixelMap object. -image.createPixelMap(color, opts, (err, pixelmap) => { - console.log('Succeeded in creating pixelmap.'); - // Failed to create the PixelMap object. - if (err) { - console.info('create pixelmap failed, err' + err); - return - } - - // Read pixels. - const area = { - pixels: new ArrayBuffer(8), - offset: 0, - stride: 8, - region: { size: { height: 1, width: 2 }, x: 0, y: 0 } - } - pixelmap.readPixels(area,() => { - let bufferArr = new Uint8Array(area.pixels); - let res = true; - for (let i = 0; i < bufferArr.length; i++) { - console.info(' buffer ' + bufferArr[i]); - if(res) { - if(bufferArr[i] == 0) { - res = false; - console.log('readPixels end.'); - break; - } - } - } - }) - - // Store pixels. - const readBuffer = new ArrayBuffer(96); - pixelmap.readPixelsToBuffer(readBuffer,() => { - let bufferArr = new Uint8Array(readBuffer); - let res = true; - for (let i = 0; i < bufferArr.length; i++) { - if(res) { - if (bufferArr[i] !== 0) { - res = false; - console.log('readPixelsToBuffer end.'); - break; - } - } - } - }) - - // Write pixels. - pixelmap.writePixels(area,() => { - const readArea = { pixels: new ArrayBuffer(20), offset: 0, stride: 8, region: { size: { height: 1, width: 2 }, x: 0, y: 0 }} - pixelmap.readPixels(readArea,() => { - let readArr = new Uint8Array(readArea.pixels); - let res = true; - for (let i = 0; i < readArr.length; i++) { - if(res) { - if (readArr[i] !== 0) { - res = false; - console.log('readPixels end.please check buffer'); - break; - } - } - } - }) - }) - - const writeColor = new ArrayBuffer(96); // Pixel data of the image. - // Write pixels to the buffer. - pixelmap.writeBufferToPixels(writeColor).then(() => { - const readBuffer = new ArrayBuffer(96); - pixelmap.readPixelsToBuffer(readBuffer).then (() => { - let bufferArr = new Uint8Array(readBuffer); - let res = true; - for (let i = 0; i < bufferArr.length; i++) { - if(res) { - if (bufferArr[i] !== i) { - res = false; - console.log('readPixels end.please check buffer'); - break; - } - } - } - }) - }) - - // Obtain image information. - pixelmap.getImageInfo((err, imageInfo) => { - // Failed to obtain the image information. - if (err || imageInfo == null) { - console.info('getImageInfo failed, err' + err); - return - } - if (imageInfo !== null) { - console.log('Succeeded in getting imageInfo'); - } - }) - - // Release the PixelMap object. - pixelmap.release(()=>{ - console.log('Succeeded in releasing pixelmap'); - }) -}) - -// Create an image source (uri). -let path = '/data/local/tmp/test.jpg'; -const imageSourceApi1 = image.createImageSource(path); - -// Create an image source (fd). -let fd = 29; -const imageSourceApi2 = image.createImageSource(fd); - -// Create an image source (data). -const data = new ArrayBuffer(96); -const imageSourceApi3 = image.createImageSource(data); - -// Release the image source. -imageSourceApi3.release(() => { - console.log('Succeeded in releasing imagesource'); -}) - -// Encode the image. -const imagePackerApi = image.createImagePacker(); -const imageSourceApi = image.createImageSource(0); -let packOpts = { format:"image/jpeg", quality:98 }; -imagePackerApi.packing(imageSourceApi, packOpts, (err, data) => { - if (err) { - console.info('packing from imagePackerApi failed, err' + err); - return - } - console.log('Succeeded in packing'); -}) - -// Release the ImagePacker object. -imagePackerApi.release(); -``` - -### Decoding Scenario - -```js -let path = '/data/local/tmp/test.jpg'; // Set the path for creating an image source. - -// Create an image source using a path. -const imageSourceApi = image.createImageSource(path); // '/data/local/tmp/test.jpg' - -// Set parameters. -let decodingOptions = { - sampleSize:1, // Sampling size of the thumbnail. - editable: true, // Whether the image can be edited. - desiredSize:{ width:1, height:2}, // Desired output size of the image. - rotateDegrees:10, // Rotation angle of the image. - desiredPixelFormat:2, // Decoded pixel format. - desiredRegion: { size: { height: 1, width: 2 }, x: 0, y: 0 }, // Region of the image to decode. - index:0// Image sequence number. - }; - -// Create a pixel map in callback mode. -imageSourceApi.createPixelMap(decodingOptions, (err, pixelmap) => { - // Failed to create the PixelMap object. - if (err) { - console.info('create pixelmap failed, err' + err); - return - } - console.log('Succeeded in creating pixelmap.'); -}) - -// Create a pixel map in promise mode. -imageSourceApi.createPixelMap().then(pixelmap => { - console.log('Succeeded in creating pixelmap.'); - - // Obtain the number of bytes in each line of pixels. - let num = pixelmap.getBytesNumberPerRow(); - - // Obtain the total number of pixel bytes. - let pixelSize = pixelmap.getPixelBytesNumber(); - - // Obtain the pixel map information. - pixelmap.getImageInfo().then( imageInfo => {}); - - // Release the PixelMap object. - pixelmap.release(()=>{ - console.log('Succeeded in releasing pixelmap'); - }) -}).catch(error => { - console.log('Failed in creating pixelmap.' + error); -}) -``` - -### Encoding Scenario - -```js -let path = '/data/local/tmp/test.png' // Set the path for creating an image source. - -// Set the image source. -const imageSourceApi = image.createImageSource(path); // '/data/local/tmp/test.png' - -// Print the error message if the image source fails to be created. -if (imageSourceApi == null) { - console.log('Failed in creating imageSource.'); -} - -// Create an image packer if the image source is successfully created. -const imagePackerApi = image.createImagePacker(); - -// Print the error information if the image packer fails to be created. -if (imagePackerApi == null) { - console.log('Failed in creating imagePacker.'); -} - -// Set encoding parameters if the image packer is successfully created. -let packOpts = { format:"image/jpeg", // The supported encoding format is jpg. - quality:98 } // Image quality, which ranges from 0 to 100. - -// Encode the image. -imagePackerApi.packing(imageSourceApi, packOpts) -.then( data => { - console.log('Succeeded in packing'); -}) - -// Release the image packer after the encoding is complete. -imagePackerApi.release(); - -// Obtain the image source information. -imageSourceApi.getImageInfo((err, imageInfo) => { - console.log('Succeeded in getting imageInfo'); -}) - -const array = new ArrayBuffer(100); // Incremental data. -// Update incremental data. -imageSourceApi.updateData(array, false, 0, 10,(error, data)=> {}) - -``` - -### Using ImageReceiver - -Example scenario: The camera functions as the client to transmit image data to the server. - -```js -public async init(surfaceId: any) { - - // (Server code) Create an ImageReceiver object. - let receiver = image.createImageReceiver(8 * 1024, 8, image.ImageFormat.JPEG, 1); - - // Obtain the surface ID. - receiver.getReceivingSurfaceId((err, surfaceId) => { - // Failed to obtain the surface ID. - if (err) { - console.info('getReceivingSurfaceId failed, err' + err); - return - } - console.info("receiver getReceivingSurfaceId success"); - }); - // Register a surface listener, which is triggered after the buffer of the surface is ready. - receiver.on('imageArrival', () => { - // Obtain the latest buffer of the surface. - receiver.readNextImage((err, img) => { - img.getComponent(4, (err, component) => { - // Consume component.byteBuffer. For example, save the content in the buffer as an image. - }) - }) - }) - - // Call a Camera API to transfer the surface ID to the camera, which then obtains the surface based on the surface ID and generates a surface buffer. -} -``` diff --git a/en/application-dev/media/local-avsession-overview.md b/en/application-dev/media/local-avsession-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..2ced0a180e3bed3a1adea4e4b3ff196721bc23a8 --- /dev/null +++ b/en/application-dev/media/local-avsession-overview.md @@ -0,0 +1,63 @@ +# Local AVSession Overview + +## Interaction Process + +For a local AVSession, the data sources are on the local device. The figure below illustrates the interaction process. + +![Local AVSession Interaction Process](figures/local-avsession-interaction-process.png) + +This process involves two roles: provider and controller. + +In the local AVSession, the provider exchanges information with the controller through AVSessionManager. + +1. The provider creates an **AVSession** object through AVSessionManager. + +2. Through the **AVSession** object, the provider sets session metadata (such as the asset ID, title, and duration) and playback attributes (such as the playback state, speed, and position). + +3. The controller creates an **AVSessionController** object through AVSessionManager. + +4. Through the **AVSessionController** object, the controller listens for changes of the session metadata and playback attributes. + +5. Through the **AVSessionController** object, the controller sends control commands to the **AVSession** object. + +6. Through the **AVSession** object, the provider listens for the control commands, for example, play, playNext, fastForward, and setSpeed, from the controller. + +## AVSessionManager + +AVSessionManager provides the capability of managing sessions. It can create an **AVSession** object, create an **AVSessionController** object, send control commands, and listen for session state changes. + +Unlike the **AVSession** and **AVSessionController** objects, AVSessionManager is not a specific object, but the root namespace of AVSessions. You can import AVSessionManager as follows: + +```ts +import AVSessionManager from '@ohos.multimedia.avsession'; +``` + +All the APIs in the root namespace can be used as APIs of AVSessionManager. + +The code snippet below shows how the provider creates an **AVSession** object by using AVSessionManager: + +```ts +// Create an AVSession object. +async createSession() { + let session: AVSessionManager.AVSession = await AVSessionManager.createAVSession(this.context, 'SESSION_NAME', 'audio'); + console.info(`session create done : sessionId : ${session.sessionId}`); +} +``` + +The code snippet below shows how the controller creates an **AVSessionController** object by using AVSessionManager: + +```ts +// Create an AVSessionController object. +async createController() { + // Obtain the descriptors of all live AVSession objects. + let descriptorsArray: Array> = await AVSessionManager.getAllSessionDescriptors(); + if (descriptorsArray.length > 0) { + // For demonstration, the session ID of the first descriptor is used to create the AVSessionController object. + let sessionId: string = descriptorsArray[0].sessionId; + let avSessionController: AVSessionManager.AVSessionController = await AVSessionManager.createController(sessionId); + console.info(`controller create done : sessionId : ${avSessionController.sessionId}`); + } +} +``` + +For more information about AVSessionManager APIs, see [API Reference](../reference/apis/js-apis-avsession.md). diff --git a/en/application-dev/media/media-application-overview.md b/en/application-dev/media/media-application-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..d350482e61e7bc9659054b0426c10ce07da88045 --- /dev/null +++ b/en/application-dev/media/media-application-overview.md @@ -0,0 +1,19 @@ +# Media Application Development Overview + +## Multimedia Subsystem Architecture + +The multimedia subsystem provides the capability of processing users' visual and auditory information. For example, it can be used to collect, compress, store, decompress, and play audio and video information. Based on the type of media information to process, the media system is usually divided into four modules: audio, media, camera, and image. + +As shown in the figure below, the multimedia subsystem provides APIs for developing audio/video, camera, and gallery applications, and provides adaptation and acceleration for different hardware chips. In the middle part, it provides core media functionalities and management mechanisms in the form of services. + +**Figure 1** Overall framework of the multimedia subsystem + +![Multimedia subsystem framework](figures/media-system-framework.png) + +- Audio module: provides interfaces and services for volume management, audio route management, and audio mixing management. + +- Media module: provides interfaces and services for audio and video decompression, playback, compression, and recording. + +- Camera module: provides interfaces and services for accurately controlling camera lenses and collecting visual information. + +- Image module: provides interfaces and services for image encoding, decoding, and processing. diff --git a/en/application-dev/media/mic-management.md b/en/application-dev/media/mic-management.md new file mode 100644 index 0000000000000000000000000000000000000000..952aeef3f3c607d3a2132eb6d1e0ab6bdd4490c9 --- /dev/null +++ b/en/application-dev/media/mic-management.md @@ -0,0 +1,114 @@ +# Microphone Management + +The microphone is used to record audio data. To deliver an optimal recording effect, you are advised to query the microphone state before starting recording and listen for state changes during recording. + +If the user mutes the microphone during audio recording, the recording process is normal, the size of the recorded file increases with the recording duration, but the data volume written into the file is 0. + +## How to Develop + +The **AudioVolumeGroupManager** class provides APIs for managing the microphone state. For details, see [API Reference](../reference/apis/js-apis-audio.md#audiovolumegroupmanager9). + +1. Create an **audioVolumeGroupManager** object. + + ```ts + import audio from '@ohos.multimedia.audio'; + + let audioVolumeGroupManager; + async function loadVolumeGroupManager() { // Create an audioVolumeGroupManager object. + const groupid = audio.DEFAULT_VOLUME_GROUP_ID; + audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid); + console.info('audioVolumeGroupManager create success.'); + } + ``` + +2. Call **on('micStateChange')** to listen for microphone state changes. When the microphone state changes, the application will be notified of the change. + + Currently, when multiple **AudioManager** instances are used in a single process, only the subscription of the last instance takes effect, and the subscription of other instances is overwritten (even if the last instance does not initiate a subscription). Therefore, you are advised to use a single **AudioManager** instance. + + + ```ts + async function on() { // Subscribe to microphone state changes. + audioVolumeGroupManager.on('micStateChange', (micStateChange) => { + console.info(`Current microphone status is: ${micStateChange.mute} `); + }); + } + ``` + +3. Call **isMicrophoneMute** to check whether the microphone is muted. If the returned value is **true**, the microphone is muted; otherwise, the microphone is not muted. + + ```ts + async function isMicrophoneMute() { // Check whether the microphone is muted. + await audioVolumeGroupManager.isMicrophoneMute().then((value) => { + console.info(`isMicrophoneMute is: ${value}.`); + }); + } + ``` + +4. Call **setMicrophoneMute** to mute or unmute the microphone. To mute the microphone, pass in **true**. To unmute the microphone, pass in **false**. + + ```ts + async function setMicrophoneMuteTrue() { // Pass in true to mute the microphone. + await audioVolumeGroupManager.setMicrophoneMute(true).then(() => { + console.info('setMicrophoneMute to mute.'); + }); + } + async function setMicrophoneMuteFalse() { // Pass in false to unmute the microphone. + await audioVolumeGroupManager.setMicrophoneMute(false).then(() => { + console.info('setMicrophoneMute to not mute.'); + }); + } + ``` + +## Sample Code + +Refer to the sample code below to complete the process of muting and unmuting the microphone. + +```ts +import audio from '@ohos.multimedia.audio'; + +@Entry +@Component +struct AudioVolumeGroup { + private audioVolumeGroupManager: audio.AudioVolumeGroupManager; + + async loadVolumeGroupManager() { + const groupid = audio.DEFAULT_VOLUME_GROUP_ID; + this.audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid); + console.info('audioVolumeGroupManager------create-------success.'); + } + + async on() { // Subscribe to microphone state changes. + await this.loadVolumeGroupManager(); + this.audioVolumeGroupManager.on('micStateChange', (micStateChange) => { + console.info(`Current microphone status is: ${micStateChange.mute} `); + }); + } + async isMicrophoneMute() { // Check whether the microphone is muted. + await this.audioVolumeGroupManager.isMicrophoneMute().then((value) => { + console.info(`isMicrophoneMute is: ${value}.`); + }); + } + async setMicrophoneMuteTrue() { // Mute the microphone. + await this.loadVolumeGroupManager(); + await this.audioVolumeGroupManager.setMicrophoneMute(true).then(() => { + console.info('setMicrophoneMute to mute.'); + }); + } + async setMicrophoneMuteFalse() { // Unmute the microphone. + await this.loadVolumeGroupManager(); + await this.audioVolumeGroupManager.setMicrophoneMute(false).then(() => { + console.info('setMicrophoneMute to not mute.'); + }); + } + async test(){ + await this.on(); + await this.isMicrophoneMute(); + await this.setMicrophoneMuteTrue(); + await this.isMicrophoneMute(); + await this.setMicrophoneMuteFalse(); + await this.isMicrophoneMute(); + await this.setMicrophoneMuteTrue(); + await this.isMicrophoneMute(); + } +} +``` diff --git a/en/application-dev/media/opensles-capture.md b/en/application-dev/media/opensles-capture.md deleted file mode 100644 index 3c33b37076ac14d98b550ba7b1a7e36bfe1cb048..0000000000000000000000000000000000000000 --- a/en/application-dev/media/opensles-capture.md +++ /dev/null @@ -1,151 +0,0 @@ -# OpenSL ES Audio Recording Development - -## Introduction - -You can use OpenSL ES to develop the audio recording function in OpenHarmony. Currently, only some [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) are implemented. If an API that has not been implemented is called, **SL_RESULT_FEATURE_UNSUPPORTED** will be returned. - -## How to Develop - -To use OpenSL ES to develop the audio recording function in OpenHarmony, perform the following steps: - -1. Add the header files. - - ```c++ - #include - #include - #include - ``` - -2. Use the **slCreateEngine** API to create and instantiate the **engine** instance. - - ```c++ - SLObjectItf engineObject = nullptr; - slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr); - (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE); - ``` - -3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** interface. - - ```c++ - SLEngineItf engineItf = nullptr; - result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineItf); - ``` - -4. Configure the recorder information (including the input source **audiosource** and output source **audiosink**), and create a **pcmCapturerObject** instance. - - ```c++ - SLDataLocator_IODevice io_device = { - SL_DATALOCATOR_IODEVICE, - SL_IODEVICE_AUDIOINPUT, - SL_DEFAULTDEVICEID_AUDIOINPUT, - NULL - }; - - SLDataSource audioSource = { - &io_device, - NULL - }; - - SLDataLocator_BufferQueue buffer_queue = { - SL_DATALOCATOR_BUFFERQUEUE, - 3 - }; - - // Configure the parameters based on the audio file format. - SLDataFormat_PCM format_pcm = { - SL_DATAFORMAT_PCM, // Input audio format. - 1, // Mono channel. - SL_SAMPLINGRATE_44_1, // Sampling rate, 44100 Hz. - SL_PCMSAMPLEFORMAT_FIXED_16, // Audio sampling format, a signed 16-bit integer in little-endian format. - 0, - 0, - 0 - }; - - SLDataSink audioSink = { - &buffer_queue, - &format_pcm - }; - - SLObjectItf pcmCapturerObject = nullptr; - result = (*engineItf)->CreateAudioRecorder(engineItf, &pcmCapturerObject, - &audioSource, &audioSink, 0, nullptr, nullptr); - (*pcmCapturerObject)->Realize(pcmCapturerObject, SL_BOOLEAN_FALSE); - ``` - -5. Obtain the **recordItf** instance of the **SL_IID_RECORD** interface. - - ```c++ - SLRecordItf recordItf; - (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_RECORD, &recordItf); - ``` - -6. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** interface. - - ```c++ - SLOHBufferQueueItf bufferQueueItf; - (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf); - ``` - -7. Register the **BufferQueueCallback** function. - - ```c++ - static void BufferQueueCallback(SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size) - { - AUDIO_INFO_LOG("BufferQueueCallback"); - FILE *wavFile = (FILE *)pContext; - if (wavFile != nullptr) { - SLuint8 *buffer = nullptr; - SLuint32 pSize = 0; - (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, pSize); - if (buffer != nullptr) { - fwrite(buffer, 1, pSize, wavFile); - (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size); - } - } - - return; - } - - // Set wavFile_ to the descriptor of the file to be recorded. - (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, wavFile_); - ``` - -8. Start audio recording. - - ```c++ - static void CaptureStart(SLRecordItf recordItf, SLOHBufferQueueItf bufferQueueItf, FILE *wavFile) - { - AUDIO_INFO_LOG("CaptureStart"); - (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_RECORDING); - if (wavFile != nullptr) { - SLuint8* buffer = nullptr; - SLuint32 pSize = 0; - (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, pSize); - if (buffer != nullptr) { - AUDIO_INFO_LOG("CaptureStart, enqueue buffer length: %{public}lu.", pSize); - fwrite(buffer, 1, pSize, wavFile); - (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, pSize); - } else { - AUDIO_INFO_LOG("CaptureStart, buffer is null or pSize: %{public}lu.", pSize); - } - } - - return; - } - ``` - -9. Stop audio recording. - - ```c++ - static void CaptureStop(SLRecordItf recordItf) - { - AUDIO_INFO_LOG("Enter CaptureStop"); - fflush(wavFile_); - (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_STOPPED); - (*pcmCapturerObject)->Destroy(pcmCapturerObject); - fclose(wavFile_); - wavFile_ = nullptr; - return; - } - ``` diff --git a/en/application-dev/media/opensles-playback.md b/en/application-dev/media/opensles-playback.md deleted file mode 100644 index fe89bc9553da3163e1e18ca43922ff99e13c1307..0000000000000000000000000000000000000000 --- a/en/application-dev/media/opensles-playback.md +++ /dev/null @@ -1,104 +0,0 @@ -# OpenSL ES Audio Playback Development - -## Introduction - -You can use OpenSL ES to develop the audio playback function in OpenHarmony. Currently, only some [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) are implemented. If an API that has not been implemented is called, **SL_RESULT_FEATURE_UNSUPPORTED** will be returned. - -## How to Develop - -To use OpenSL ES to develop the audio playback function in OpenHarmony, perform the following steps: - -1. Add the header files. - - ```c++ - #include - #include - #include - ``` - -2. Use the **slCreateEngine** API to obtain an **engine** instance. - - ```c++ - SLObjectItf engineObject = nullptr; - slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr); - (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE); - ``` - -3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** interface. - - ```c++ - SLEngineItf engineEngine = nullptr; - (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine); - ``` - -4. Configure the player and create an **AudioPlayer** instance. - - ```c++ - SLDataLocator_BufferQueue slBufferQueue = { - SL_DATALOCATOR_BUFFERQUEUE, - 0 - }; - - // Configure the parameters based on the audio file format. - SLDataFormat_PCM pcmFormat = { - SL_DATAFORMAT_PCM, - 2, - 48000, - 16, - 0, - 0, - 0 - }; - SLDataSource slSource = {&slBufferQueue, &pcmFormat}; - - SLObjectItf pcmPlayerObject = nullptr; - (*engineEngine)->CreateAudioPlayer(engineEngine, &pcmPlayerObject, &slSource, null, 0, nullptr, nullptr); - (*pcmPlayerObject)->Realize(pcmPlayerObject, SL_BOOLEAN_FALSE); - ``` - -5. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** interface. - - ```c++ - SLOHBufferQueueItf bufferQueueItf; - (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf); - ``` - -6. Open an audio file and register the **BufferQueueCallback** function. - - ```c++ - FILE *wavFile_ = nullptr; - - static void BufferQueueCallback (SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size) - { - FILE *wavFile = (FILE *)pContext; - if (!feof(wavFile)) { - SLuint8 *buffer = nullptr; - SLuint32 pSize = 0; - (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, pSize); - // Read data from the file. - fread(buffer, 1, size, wavFile); - (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size); - } - return; - } - - // Set wavFile_ to the descriptor of the file to be played. - wavFile_ = fopen(path, "rb"); - (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, wavFile_); - ``` - -7. Obtain the **playItf** instance of the **SL_PLAYSTATE_PLAYING** interface and start playback. - - ```c++ - SLPlayItf playItf = nullptr; - (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_PLAY, &playItf); - (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PLAYING); - ``` - -8. Stop audio playback. - - ```c++ - (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_STOPPED); - (*pcmPlayerObject)->Destroy(pcmPlayerObject); - (*engineObject)->Destroy(engineObject); - ``` diff --git a/en/application-dev/media/remote-camera.md b/en/application-dev/media/remote-camera.md deleted file mode 100644 index d7bf710279c1504cd9703eca9af7cf5433cb3dac..0000000000000000000000000000000000000000 --- a/en/application-dev/media/remote-camera.md +++ /dev/null @@ -1,65 +0,0 @@ -# Distributed Camera Development - -## When to Use - -You can call the APIs provided by the **Camera** module to develop a distributed camera that provides the basic camera functions such as shooting and video recording. - -## How to Develop -Connect your calculator to a distributed device. Your calculator will call **getSupportedCameras()** to obtain the camera list and traverse the returned camera list to check **ConnectionType** of the **Camera** objects. If **ConnectionType** of a **Camera** object is **CAMERA_CONNECTION_REMOTE**, your calculator will use this object to create a **cameraInput** object. The subsequent call process is the same as that of the local camera development. For details about the local camera development, see [Camera Development](./camera.md). - -For details about the APIs, see [Camera Management](../reference/apis/js-apis-camera.md). - -### Connecting to a Distributed Camera - -Connect the calculator and the distributed device to the same LAN. - -Open the calculator and click the arrow icon in the upper right corner. A new window is displayed. Enter the verification code as prompted, and the calculator will be connected to the distributed device. - -### Creating an Instance - -```js -import camera from '@ohos.multimedia.camera' -import image from '@ohos.multimedia.image' -import media from '@ohos.multimedia.media' -import featureAbility from '@ohos.ability.featureAbility' - -// Create a CameraManager object. -let cameraManager = camera.getCameraManager(globalThis.Context) -if (!cameraManager) { - console.error("camera.getCameraManager error") - return; -} - -// Register a callback to listen for camera status changes and obtain the updated camera status information. -cameraManager.on('cameraStatus', (cameraStatusInfo) => { - console.log('camera : ' + cameraStatusInfo.camera.cameraId); - console.log('status: ' + cameraStatusInfo.status); -}) - -// Obtain the camera list. -let remoteCamera -let cameraArray = cameraManager.getSupportedCameras(); -if (cameraArray.length <= 0) { - console.error("cameraManager.getSupportedCameras error") - return; -} - -for(let cameraIndex = 0; cameraIndex < cameraArray.length; cameraIndex++) { - console.log('cameraId : ' + cameraArray[cameraIndex].cameraId) // Obtain the camera ID. - console.log('cameraPosition : ' + cameraArray[cameraIndex].cameraPosition) // Obtain the camera position. - console.log('cameraType : ' + cameraArray[cameraIndex].cameraType) // Obtain the camera type. - console.log('connectionType : ' + cameraArray[cameraIndex].connectionType) // Obtain the camera connection type. - if (cameraArray[cameraIndex].connectionType == CAMERA_CONNECTION_REMOTE) { - remoteCamera = cameraArray[cameraIndex] - } -} - -// Create a camera input stream. -let cameraInput -try { - cameraInput = cameraManager.createCameraInput(remoteCamera); -} catch () { - console.error('Failed to createCameraInput errorCode = ' + error.code); -} -``` -For details about the subsequent steps, see [Camera Development](./camera.md). diff --git a/en/application-dev/media/using-audiocapturer-for-recording.md b/en/application-dev/media/using-audiocapturer-for-recording.md new file mode 100644 index 0000000000000000000000000000000000000000..87d13fa3f749cb18ba1c9d61843b750a36a1bcad --- /dev/null +++ b/en/application-dev/media/using-audiocapturer-for-recording.md @@ -0,0 +1,211 @@ +# Using AudioCapturer for Audio Recording + +The AudioCapturer is used to record Pulse Code Modulation (PCM) audio data. It is suitable if you have extensive audio development experience and want to implement more flexible recording features. + +## Development Guidelines + +The full recording process involves creating an **AudioCapturer** instance, configuring audio recording parameters, starting and stopping recording, and releasing the instance. In this topic, you will learn how to use the AudioCapturer to recording audio data. Before the development, you are advised to read [AudioCapturer](../reference/apis/js-apis-audio.md#audiocapturer8) for the API reference. + +The figure below shows the state changes of the AudioCapturer. After an **AudioCapturer** instance is created, different APIs can be called to switch the AudioCapturer to different states and trigger the required behavior. If an API is called when the AudioCapturer is not in the given state, the system may throw an exception or generate other undefined behavior. Therefore, you are advised to check the AudioCapturer state before triggering state transition. + +**Figure 1** AudioCapturer state transition +![AudioCapturer state change](figures/audiocapturer-status-change.png) + +You can call **on('stateChange')** to listen for state changes. For details about each state, see [AudioState](../reference/apis/js-apis-audio.md#audiostate8). + +### How to Develop + +1. Set audio recording parameters and create an **AudioCapturer** instance. For details about the parameters, see [AudioCapturerOptions](../reference/apis/js-apis-audio.md#audiocaptureroptions8). + + ```ts + import audio from '@ohos.multimedia.audio'; + + let audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, + channels: audio.AudioChannel.CHANNEL_2, + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW + }; + + let audioCapturerInfo = { + source: audio.SourceType.SOURCE_TYPE_MIC, + capturerFlags: 0 + }; + + let audioCapturerOptions = { + streamInfo: audioStreamInfo, + capturerInfo: audioCapturerInfo + }; + + audio.createAudioCapturer(audioCapturerOptions, (err, data) => { + if (err) { + console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Invoke createAudioCapturer succeeded.'); + let audioCapturer = data; + } + }); + ``` + +2. Call **start()** to switch the AudioCapturer to the **running** state and start recording. + + ```ts + audioCapturer.start((err) => { + if (err) { + console.error(`Capturer start failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Capturer start success.'); + } + }); + ``` + +3. Specify the recording file path and call **read()** to read the data in the buffer. + + ```ts + let file = fs.openSync(path, 0o2 | 0o100); + let bufferSize = await audioCapturer.getBufferSize(); + let buffer = await audioCapturer.read(bufferSize, true); + fs.writeSync(file.fd, buffer); + ``` + +4. Call **stop()** to stop recording. + + ```ts + audioCapturer.stop((err) => { + if (err) { + console.error(`Capturer stop failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Capturer stopped.'); + } + }); + ``` + +5. Call **release()** to release the instance. + + ```ts + audioCapturer.release((err) => { + if (err) { + console.error(`capturer release failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('capturer released.'); + } + }); + ``` + + +### Sample Code + +Refer to the sample code below to record audio using AudioCapturer. + +```ts +import audio from '@ohos.multimedia.audio'; +import fs from '@ohos.file.fs'; + +const TAG = 'AudioCapturerDemo'; + +export default class AudioCapturerDemo { + private audioCapturer = undefined; + private audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, + channels: audio.AudioChannel.CHANNEL_1, + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW + } + private audioCapturerInfo = { + source: audio.SourceType.SOURCE_TYPE_MIC, // Audio source type. + capturerFlags: 0 // Flag indicating an AudioCapturer. + } + private audioCapturerOptions = { + streamInfo: this.audioStreamInfo, + capturerInfo: this.audioCapturerInfo + } + + // Create an AudioCapturer instance, and set the events to listen for. + init() { + audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // Create an AudioCapturer instance. + if (err) { + console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`); + return; + } + + console.info(`${TAG}: create AudioCapturer success`); + this.audioCapturer = capturer; + this.audioCapturer.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of captured frames reaches 1000. + if (position === 1000) { + console.info('ON Triggered successfully'); + } + }); + this.audioCapturer.on('periodReach', 2000, (position) => { // Subscribe to the periodReach event. A callback is triggered when the number of captured frames reaches 2000. + if (position === 2000) { + console.info('ON Triggered successfully'); + } + }); + + }); + } + + // Start audio recording. + async start() { + let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; + if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // Recording can be started only when the AudioCapturer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state. + console.error(`${TAG}: start failed`); + return; + } + await this.audioCapturer.start(); // Start recording. + + let context = getContext(this); + const path = context.filesDir + '/test.wav'; // Path for storing the recorded audio file. + + let file = fs.openSync(path, 0o2 | 0o100); // Create the file if it does not exist. + let fd = file.fd; + let numBuffersToCapture = 150; // Write data for 150 times. + let count = 0; + while (numBuffersToCapture) { + let bufferSize = await this.audioCapturer.getBufferSize(); + let buffer = await this.audioCapturer.read(bufferSize, true); + let options = { + offset: count * bufferSize, + length: bufferSize + }; + if (buffer === undefined) { + console.error(`${TAG}: read buffer failed`); + } else { + let number = fs.writeSync(fd, buffer, options); + console.info(`${TAG}: write date: ${number}`); + } + numBuffersToCapture--; + count++; + } + } + + // Stop recording. + async stop() { + // The AudioCapturer can be stopped only when it is in the STATE_RUNNING or STATE_PAUSED state. + if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) { + console.info('Capturer is not running or paused'); + return; + } + await this.audioCapturer.stop(); // Stop recording. + if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) { + console.info('Capturer stopped'); + } else { + console.error('Capturer stop failed'); + } + } + + // Release the instance. + async release() { + // The AudioCapturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state. + if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) { + console.info('Capturer already released'); + return; + } + await this.audioCapturer.release(); // Release the instance. + if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) { + console.info('Capturer released'); + } else { + console.error('Capturer release failed'); + } + } +} +``` diff --git a/en/application-dev/media/using-audiorenderer-for-playback.md b/en/application-dev/media/using-audiorenderer-for-playback.md new file mode 100644 index 0000000000000000000000000000000000000000..11934e669813fa7a89ceef43bd2c3795db6bad75 --- /dev/null +++ b/en/application-dev/media/using-audiorenderer-for-playback.md @@ -0,0 +1,268 @@ +# Using AudioRenderer for Audio Playback + +The AudioRenderer is used to play Pulse Code Modulation (PCM) audio data. Unlike the AVPlayer, the AudioRenderer can perform data preprocessing before audio input. Therefore, the AudioRenderer is more suitable if you have extensive audio development experience and want to implement more flexible playback features. + +## Development Guidelines + +The full rendering process involves creating an **AudioRenderer** instance, configuring audio rendering parameters, starting and stopping rendering, and releasing the instance. In this topic, you will learn how to use the AudioRenderer to render audio data. Before the development, you are advised to read [AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8) for the API reference. + +The figure below shows the state changes of the AudioRenderer. After an **AudioRenderer** instance is created, different APIs can be called to switch the AudioRenderer to different states and trigger the required behavior. If an API is called when the AudioRenderer is not in the given state, the system may throw an exception or generate other undefined behavior. Therefore, you are advised to check the AudioRenderer state before triggering state transition. + +To prevent the UI thread from being blocked, most **AudioRenderer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the callback functions. + +**Figure 1** AudioRenderer state transition + +![AudioRenderer state transition](figures/audiorenderer-status-change.png) + +During application development, you are advised to use **on('stateChange')** to subscribe to state changes of the AudioRenderer. This is because some operations can be performed only when the AudioRenderer is in a given state. If the application performs an operation when the AudioRenderer is not in the given state, the system may throw an exception or generate other undefined behavior. + +- **prepared**: The AudioRenderer enters this state by calling **createAudioRenderer()**. + +- **running**: The AudioRenderer enters this state by calling **start()** when it is in the **prepared**, **paused**, or **stopped** state. + +- **paused**: The AudioRenderer enters this state by calling **pause()** when it is in the **running** state. When the audio playback is paused, it can call **start()** to resume the playback. + +- **stopped**: The AudioRenderer enters this state by calling **stop()** when it is in the **paused** or **running** state + +- **released**: The AudioRenderer enters this state by calling **release()** when it is in the **prepared**, **paused**, or **stopped** state. In this state, the AudioRenderer releases all occupied hardware and software resources and will not transit to any other state. + +### How to Develop + +1. Set audio rendering parameters and create an **AudioRenderer** instance. For details about the parameters, see [AudioRendererOptions](../reference/apis/js-apis-audio.md#audiorendereroptions8). + + ```ts + import audio from '@ohos.multimedia.audio'; + + let audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, + channels: audio.AudioChannel.CHANNEL_1, + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW + }; + + let audioRendererInfo = { + content: audio.ContentType.CONTENT_TYPE_SPEECH, + usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, + rendererFlags: 0 + }; + + let audioRendererOptions = { + streamInfo: audioStreamInfo, + rendererInfo: audioRendererInfo + }; + + audio.createAudioRenderer(audioRendererOptions, (err, data) => { + if (err) { + console.error(`Invoke createAudioRenderer failed, code is ${err.code}, message is ${err.message}`); + return; + } else { + console.info('Invoke createAudioRenderer succeeded.'); + let audioRenderer = data; + } + }); + ``` + +2. Call **start()** to switch the AudioRenderer to the **running** state and start rendering. + + ```ts + audioRenderer.start((err) => { + if (err) { + console.error(`Renderer start failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Renderer start success.'); + } + }); + ``` + +3. Specify the address of the file to render. Open the file and call **write()** to continuously write audio data to the buffer for rendering and playing. To implement personalized playback, process the audio data before writing it. + + ```ts + const bufferSize = await audioRenderer.getBufferSize(); + let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); + let buf = new ArrayBuffer(bufferSize); + let readsize = await fs.read(file.fd, buf); + let writeSize = await new Promise((resolve, reject) => { + audioRenderer.write(buf, (err, writeSize) => { + if (err) { + reject(err); + } else { + resolve(writeSize); + } + }); + }); + ``` + +4. Call **stop()** to stop rendering. + + ```ts + audioRenderer.stop((err) => { + if (err) { + console.error(`Renderer stop failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Renderer stopped.'); + } + }); + ``` + +5. Call **release()** to release the instance. + + ```ts + audioRenderer.release((err) => { + if (err) { + console.error(`Renderer release failed, code is ${err.code}, message is ${err.message}`); + } else { + console.info('Renderer released.'); + } + }); + ``` + +### Sample Code + +Refer to the sample code below to render an audio file using AudioRenderer. + +```ts +import audio from '@ohos.multimedia.audio'; +import fs from '@ohos.file.fs'; + +const TAG = 'AudioRendererDemo'; + +export default class AudioRendererDemo { + private renderModel = undefined; + private audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // Sampling rate. + channels: audio.AudioChannel.CHANNEL_2, // Channel. + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format. + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format. + } + private audioRendererInfo = { + content: audio.ContentType.CONTENT_TYPE_MUSIC, // Media type. + usage: audio.StreamUsage.STREAM_USAGE_MEDIA, // Audio stream usage type. + rendererFlags: 0 // AudioRenderer flag. + } + private audioRendererOptions = { + streamInfo: this.audioStreamInfo, + rendererInfo: this.audioRendererInfo + } + + // Create an AudioRenderer instance, and set the events to listen for. + init() { + audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // Create an AudioRenderer instance. + if (!err) { + console.info(`${TAG}: creating AudioRenderer success`); + this.renderModel = renderer; + this.renderModel.on('stateChange', (state) => { // Set the events to listen for. A callback is invoked when the AudioRenderer is switched to the specified state. + if (state == 1) { + console.info('audio renderer state is: STATE_PREPARED'); + } + if (state == 2) { + console.info('audio renderer state is: STATE_RUNNING'); + } + }); + this.renderModel.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of rendered frames reaches 1000. + if (position == 1000) { + console.info('ON Triggered successfully'); + } + }); + } else { + console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`); + } + }); + } + + // Start audio rendering. + async start() { + let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; + if (stateGroup.indexOf(this.renderModel.state) === -1) { // Rendering can be started only when the AudioRenderer is in the prepared, paused, or stopped state. + console.error(TAG + 'start failed'); + return; + } + await this.renderModel.start(); // Start rendering. + + const bufferSize = await this.renderModel.getBufferSize(); + let context = getContext(this); + let path = context.filesDir; + const filePath = path + '/test.wav'; // Use the sandbox path to obtain the file. The actual file path is /data/storage/el2/base/haps/entry/files/test.wav. + + let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); + let stat = await fs.stat(filePath); + let buf = new ArrayBuffer(bufferSize); + let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); + for (let i = 0; i < len; i++) { + let options = { + offset: i * bufferSize, + length: bufferSize + }; + let readsize = await fs.read(file.fd, buf, options); + + // buf indicates the audio data to be written to the buffer. Before calling AudioRenderer.write(), you can preprocess the audio data for personalized playback. The AudioRenderer reads the audio data written to the buffer for rendering. + + let writeSize = await new Promise((resolve, reject) => { + this.renderModel.write(buf, (err, writeSize) => { + if (err) { + reject(err); + } else { + resolve(writeSize); + } + }); + }); + if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // The rendering stops if the AudioRenderer is in the released state. + fs.close(file); + await this.renderModel.stop(); + } + if (this.renderModel.state === audio.AudioState.STATE_RUNNING) { + if (i === len - 1) { // The rendering stops if the file finishes reading. + fs.close(file); + await this.renderModel.stop(); + } + } + } + } + + // Pause the rendering. + async pause() { + // Rendering can be paused only when the AudioRenderer is in the running state. + if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) { + console.info('Renderer is not running'); + return; + } + await this.renderModel.pause(); // Pause rendering. + if (this.renderModel.state === audio.AudioState.STATE_PAUSED) { + console.info('Renderer is paused.'); + } else { + console.error('Pausing renderer failed.'); + } + } + + // Stop rendering. + async stop() { + // Rendering can be stopped only when the AudioRenderer is in the running or paused state. + if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) { + console.info('Renderer is not running or paused.'); + return; + } + await this.renderModel.stop(); // Stop rendering. + if (this.renderModel.state === audio.AudioState.STATE_STOPPED) { + console.info('Renderer stopped.'); + } else { + console.error('Stopping renderer failed.'); + } + } + + // Release the instance. + async release() { + // The AudioRenderer can be released only when it is not in the released state. + if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { + console.info('Renderer already released'); + return; + } + await this.renderModel.release(); // Release the instance. + if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { + console.info('Renderer released'); + } else { + console.error('Renderer release failed.'); + } + } +} +``` + +When audio streams with the same or higher priority need to use the output device, the current audio playback will be interrupted. The application can respond to and handle the interruption event. For details about how to process concurrent audio playback, see [Audio Playback Concurrency Policies](audio-playback-concurrency.md). diff --git a/en/application-dev/media/using-avplayer-for-playback.md b/en/application-dev/media/using-avplayer-for-playback.md new file mode 100644 index 0000000000000000000000000000000000000000..6cb6ab1e67ef0ae8a44e04fa915ad87bcc9ed024 --- /dev/null +++ b/en/application-dev/media/using-avplayer-for-playback.md @@ -0,0 +1,167 @@ +# Using AVPlayer for Audio Playback + +The AVPlayer is used to play raw media assets in an end-to-end manner. In this topic, you will learn how to use the AVPlayer to play a complete piece of music. + +If you want the application to continue playing the music in the background or when the screen is off, you must use the [AVSession](avsession-overview.md) and [continuous task](../task-management/continuous-task-dev-guide.md) to prevent the playback from being forcibly interrupted by the system. + + +The full playback process includes creating an **AVPlayer** instance, setting the media asset to play, setting playback parameters (volume, speed, and focus mode), controlling playback (play, pause, seek, and stop), resetting the playback configuration, and releasing the instance. + + +During application development, you can use the **state** attribute of the AVPlayer to obtain the AVPlayer state or call **on('stateChange')** to listen for state changes. If the application performs an operation when the AVPlayer is not in the given state, the system may throw an exception or generate other undefined behavior. + + +**Figure 1** Playback state transition +![Playback state change](figures/playback-status-change.png) + +For details about the state, see [AVPlayerState](../reference/apis/js-apis-media.md#avplayerstate9). When the AVPlayer is in the **prepared**, **playing**, **paused**, or **completed** state, the playback engine is working and a large amount of RAM is occupied. If your application does not need to use the AVPlayer, call **reset()** or **release()** to release the instance. + +## How to Develop + +Read [AVPlayer](../reference/apis/js-apis-media.md#avplayer9) for the API reference. + +1. Call **createAVPlayer()** to create an **AVPlayer** instance. The AVPlayer is the **idle** state. + +2. Set the events to listen for, which will be used in the full-process scenario. The table below lists the supported events. + | Event Type| Description| + | -------- | -------- | + | stateChange | Mandatory; used to listen for changes of the **state** attribute of the AVPlayer.| + | error | Mandatory; used to listen for AVPlayer errors.| + | durationUpdate | Used to listen for progress bar updates to refresh the media asset duration.| + | timeUpdate | Used to listen for the current position of the progress bar to refresh the current time.| + | seekDone | Used to listen for the completion status of the **seek()** request.
This event is reported when the AVPlayer seeks to the playback position specified in **seek()**.| + | speedDone | Used to listen for the completion status of the **setSpeed()** request.
This event is reported when the AVPlayer plays music at the speed specified in **setSpeed()**.| + | volumeChange | Used to listen for the completion status of the **setVolume()** request.
This event is reported when the AVPlayer plays music at the volume specified in **setVolume()**.| + | bufferingUpdate | Used to listen for network playback buffer information. This event reports the buffer percentage and playback progress.| + | audioInterrupt | Used to listen for audio interruption. This event is used together with the **audioInterruptMode** attribute.
This event is reported when the current audio playback is interrupted by another (for example, when a call is coming), so the application can process the event in time.| + +3. Set the media asset URL. The AVPlayer enters the **initialized** state. + > **NOTE** + > + > The URL in the code snippet below is for reference only. You need to check the media asset validity and set the URL based on service requirements. + > + > - If local files are used for playback, ensure that the files are available and the application sandbox path is used for access. For details about how to obtain the application sandbox path, see [Obtaining the Application Development Path](../application-models/application-context-stage.md#obtaining-the-application-development-path). For details about the application sandbox and how to push files to the application sandbox, see [File Management](../file-management/app-sandbox-directory.md). + > + > - If a network playback path is used, you must request the ohos.permission.INTERNET [permission](../security/accesstoken-guidelines.md). + > + > - You can also use **ResourceManager.getRawFd** to obtain the file descriptor of a file packed in the HAP file. For details, see [ResourceManager API Reference](../reference/apis/js-apis-resource-manager.md#getrawfd9). + > + > - The [playback formats and protocols](avplayer-avrecorder-overview.md#supported-formats-and-protocols) in use must be those supported by the system. + +4. Call **prepare()** to switch the AVPlayer to the **prepared** state. In this state, you can obtain the duration of the media asset to play and set the volume. + +5. Call **play()**, **pause()**, **seek()**, and **stop()** to perform audio playback control as required. + +6. (Optional) Call **reset()** to reset the AVPlayer. The AVPlayer enters the **idle** state again and you can change the media asset URL. + +7. Call **release()** to switch the AVPlayer to the **released** state. Now your application exits the playback. + +## Sample Code + +Refer to the sample code below to play a complete piece of music. + +```ts +import media from '@ohos.multimedia.media'; +import fs from '@ohos.file.fs'; +import common from '@ohos.app.ability.common'; + +export class AVPlayerDemo { + private avPlayer; + private count: number = 0; + + // Set AVPlayer callback functions. + setAVPlayerCallback() { + // Callback function for the seek operation. + this.avPlayer.on('seekDone', (seekDoneTime) => { + console.info(`AVPlayer seek succeeded, seek time is ${seekDoneTime}`); + }) + // Callback function for errors. If an error occurs during the operation on the AVPlayer, reset() is called to reset the AVPlayer. + this.avPlayer.on('error', (err) => { + console.error(`Invoke avPlayer failed, code is ${err.code}, message is ${err.message}`); + this.avPlayer.reset(); // Call reset() to reset the AVPlayer, which enters the idle state. + }) + // Callback function for state changes. + this.avPlayer.on('stateChange', async (state, reason) => { + switch (state) { + case 'idle': // This state is reported upon a successful callback of reset(). + console.info('AVPlayer state idle called.'); + this.avPlayer.release(); // Call release() to release the instance. + break; + case 'initialized': // This state is reported when the AVPlayer sets the playback source. + console.info('AVPlayerstate initialized called.'); + this.avPlayer.prepare().then(() => { + console.info('AVPlayer prepare succeeded.'); + }, (err) => { + console.error(`Invoke prepare failed, code is ${err.code}, message is ${err.message}`); + }); + break; + case 'prepared': // This state is reported upon a successful callback of prepare(). + console.info('AVPlayer state prepared called.'); + this.avPlayer.play(); // Call play() to start playback. + break; + case 'playing': // This state is reported upon a successful callback of play(). + console.info('AVPlayer state playing called.'); + if (this.count !== 0) { + console.info('AVPlayer start to seek.'); + this.avPlayer.seek (this.avPlayer.duration); // Call seek() to seek to the end of the audio clip. + } else { + this.avPlayer.pause(); // Call pause() to pause the playback. + } + this.count++; + break; + case 'paused': // This state is reported upon a successful callback of pause(). + console.info('AVPlayer state paused called.'); + this.avPlayer.play(); // Call play() again to start playback. + break; + case 'completed': // This state is reported upon the completion of the playback. + console.info('AVPlayer state completed called.'); + this.avPlayer.stop(); // Call stop() to stop the playback. + break; + case 'stopped': // This state is reported upon a successful callback of stop(). + console.info('AVPlayer state stopped called.'); + this.avPlayer.reset(); // Call reset() to reset the AVPlayer state. + break; + case 'released': + console.info('AVPlayer state released called.'); + break; + default: + console.info('AVPlayer state unknown called.'); + break; + } + }) + } + + // The following demo shows how to use the file system to open the sandbox address, obtain the media file address, and play the media file using the URL attribute. + async avPlayerUrlDemo() { + // Create an AVPlayer instance. + this.avPlayer = await media.createAVPlayer(); + // Set a callback function for state changes. + this.setAVPlayerCallback(); + let fdPath = 'fd://'; + // Obtain the sandbox address filesDir through UIAbilityContext. The stage model is used as an example. + let context = getContext(this) as common.UIAbilityContext; + let pathDir = context.filesDir; + let path = pathDir + '/01.mp3'; + // Open the corresponding file address to obtain the file descriptor and assign a value to the URL to trigger the reporting of the initialized state. + let file = await fs.open(path); + fdPath = fdPath + '' + file.fd; + this.avPlayer.url = fdPath; + } + + // The following demo shows how to use resourceManager to obtain the media file packed in the HAP file and play the media file by using the fdSrc attribute. + async avPlayerFdSrcDemo() { + // Create an AVPlayer instance. + this.avPlayer = await media.createAVPlayer(); + // Set a callback function for state changes. + this.setAVPlayerCallback(); + // Call getRawFd of the resourceManager member of UIAbilityContext to obtain the media asset URL. + // The return type is {fd,offset,length}, where fd indicates the file descriptor address of the HAP file, offset indicates the media asset offset, and length indicates the duration of the media asset to play. + let context = getContext(this) as common.UIAbilityContext; + let fileDescriptor = await context.resourceManager.getRawFd('01.mp3'); + // Assign a value to fdSrc to trigger the reporting of the initialized state. + this.avPlayer.fdSrc = fileDescriptor; + } +} +``` + + \ No newline at end of file diff --git a/en/application-dev/media/using-avrecorder-for-recording.md b/en/application-dev/media/using-avrecorder-for-recording.md new file mode 100644 index 0000000000000000000000000000000000000000..71ab8557df470671088adfaa0473a6448d935881 --- /dev/null +++ b/en/application-dev/media/using-avrecorder-for-recording.md @@ -0,0 +1,182 @@ +# Using AVRecorder for Audio Recording + +You will learn how to use the AVRecorder to develop audio recording functionalities including starting, pausing, resuming, and stopping recording. + +During application development, you can use the **state** attribute of the AVRecorder to obtain the AVRecorder state or call **on('stateChange')** to listen for state changes. Your code must meet the state machine requirements. For example, **pause()** is called only when the AVRecorder is in the **started** state, and **resume()** is called only when it is in the **paused** state. + +**Figure 1** Recording state transition + +![Recording state change](figures/recording-status-change.png) + +For details about the state, see [AVRecorderState](../reference/apis/js-apis-media.md#avrecorderstate9). + + +## How to Develop + +Read [AVRecorder](../reference/apis/js-apis-media.md#avrecorder9) for the API reference. + +1. Create an **AVRecorder** instance. The AVRecorder is the **idle** state. + + ```ts + import media from '@ohos.multimedia.media'; + + let avRecorder = undefined; + media.createAVRecorder().then((recorder) => { + avRecorder = recorder; + }, (err) => { + console.error(`Invoke createAVRecorder failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +2. Set the events to listen for. + | Event Type| Description| + | -------- | -------- | + | stateChange | Mandatory; used to listen for changes of the **state** attribute of the AVRecorder.| + | error | Mandatory; used to listen for AVRecorder errors.| + + + ```ts + // Callback function for state changes. + avRecorder.on('stateChange', (state, reason) => { + console.log(`current state is ${state}`); + // You can add the action to be performed after the state is switched. + }) + + // Callback function for errors. + avRecorder.on('error', (err) => { + console.error(`avRecorder failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +3. Set audio recording parameters and call **prepare()**. The AVRecorder enters the **prepared** state. + > **NOTE** + > + > Pay attention to the following when configuring parameters: + > + > - In pure audio recording scenarios, set only audio-related parameters in **avConfig** of **prepare()**. + > If video-related parameters are configured, an error will be reported in subsequent steps. If video recording is required, follow the instructions provided in [Video Recording Development](video-recording.md). + > + > - The [recording formats](avplayer-avrecorder-overview.md#supported-formats) in use must be those supported by the system. + > + > - The recording output URL (URL in **avConfig** in the sample code) must be in the format of fd://xx (where xx indicates a file descriptor). You must call [ohos.file.fs](../reference/apis/js-apis-file-fs.md) to implement access to the application file. For details, see [Application File Access and Management](../file-management/app-file-access.md). + + + ```ts + let avProfile = { + audioBitrate: 100000, // Audio bit rate. + audioChannels: 2, // Number of audio channels. + audioCodec: media.CodecMimeType.AUDIO_AAC, // Audio encoding format. Currently, only AAC is supported. + audioSampleRate: 48000, // Audio sampling rate. + fileFormat: media.ContainerFormatType.CFT_MPEG_4A, // Encapsulation format. Currently, only M4A is supported. + } + let avConfig = { + audioSourceType: media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, // Audio input source. In this example, the microphone is used. + profile: avProfile, + url: 'fd://35', // Obtain the file descriptor of the created audio file by referring to the sample code in Application File Access and Management. + } + avRecorder.prepare(avConfig).then(() => { + console.log('Invoke prepare succeeded.'); + }, (err) => { + console.error(`Invoke prepare failed, code is ${err.code}, message is ${err.message}`); + }) + ``` + +4. Call **start()** to start recording. The AVRecorder enters the **started** state. + +5. Call **pause()** to pause recording. The AVRecorder enters the **paused** state. + +6. Call **resume()** to resume recording. The AVRecorder enters the **started** state again. + +7. Call **stop()** to stop recording. The AVRecorder enters the **stopped** state. + +8. Call **reset()** to reset the resources. The AVRecorder enters the **idle** state. In this case, you can reconfigure the recording parameters. + +9. Call **release()** to switch the AVRecorder to the **released** state. Now your application exits the recording. + + +## Sample Code + + Refer to the sample code below to complete the process of starting, pausing, resuming, and stopping recording. + +```ts +import media from '@ohos.multimedia.media'; + +export class AudioRecorderDemo { + private avRecorder; + private avProfile = { + audioBitrate: 100000, // Audio bit rate. + audioChannels: 2, // Number of audio channels. + audioCodec: media.CodecMimeType.AUDIO_AAC, // Audio encoding format. Currently, only AAC is supported. + audioSampleRate: 48000, // Audio sampling rate. + fileFormat: media.ContainerFormatType.CFT_MPEG_4A, // Encapsulation format. Currently, only M4A is supported. + }; + private avConfig = { + audioSourceType: media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, // Audio input source. In this example, the microphone is used. + profile: this.avProfile, + url: 'fd://35', // Create, read, and write a file by referring to the sample code in Application File Access and Management. + }; + + // Set AVRecorder callback functions. + setAudioRecorderCallback() { + // Callback function for state changes. + this.avRecorder.on('stateChange', (state, reason) => { + console.log(`AudioRecorder current state is ${state}`); + }) + // Callback function for errors. + this.avRecorder.on('error', (err) => { + console.error(`AudioRecorder failed, code is ${err.code}, message is ${err.message}`); + }) + } + + // Process of starting recording. + async startRecordingProcess() { + // 1. Create an AVRecorder instance. + this.avRecorder = await media.createAVRecorder(); + this.setAudioRecorderCallback(); + // 2. Obtain the file descriptor of the recording file and assign it to the URL in avConfig. For details, see FilePicker. + // 3. Set recording parameters to complete the preparations. + await this.avRecorder.prepare(this.avConfig); + // 4. Start recording. + await this.avRecorder.start(); + } + + // Process of pausing recording. + async pauseRecordingProcess() { + if (this.avRecorder.state ==='started') { // pause() can be called only when the AVRecorder is in the started state . + await this.avRecorder.pause(); + } + } + + // Process of resuming recording. + async resumeRecordingProcess() { + if (this.avRecorder.state === 'paused') { // resume() can be called only when the AVRecorder is in the paused state . + await this.avRecorder.resume(); + } + } + + // Process of stopping recording. + async stopRecordingProcess() { + // 1. Stop recording. + if (this.avRecorder.state === 'started' + || this.avRecorder.state ==='paused') { // stop() can be called only when the AVRecorder is in the started or paused state. + await this.avRecorder.stop(); + } + // 2. Reset the AVRecorder. + await this.avRecorder.reset(); + // 3. Release the AVRecorder instance. + await this.avRecorder.release(); + // 4. Close the file descriptor of the recording file. + } + + // Complete sample code for starting, pausing, resuming, and stopping recording. + async audioRecorderDemo() { + await this.startRecordingProcess(); // Start recording. + // You can set the recording duration. For example, you can set the sleep mode to prevent code execution. + await this.pauseRecordingProcess(); // Pause recording. + await this.resumeRecordingProcess(); // Resume recording. + await this.stopRecordingProcess(); // Stop recording. + } +} +``` + + \ No newline at end of file diff --git a/en/application-dev/media/using-avsession-controller.md b/en/application-dev/media/using-avsession-controller.md new file mode 100644 index 0000000000000000000000000000000000000000..5e4b69d8b48f5acad64f120892062e66d67c6b12 --- /dev/null +++ b/en/application-dev/media/using-avsession-controller.md @@ -0,0 +1,244 @@ +# AVSession Controller + +Media Controller preset in OpenHarmony functions as the controller to interact with audio and video applications, for example, obtaining and displaying media information and delivering control commands. + +You can develop a system application (for example, a new playback control center or voice assistant) as the controller to interact with audio and video applications in the system. + +## Basic Concepts + +- AVSessionDescriptor: session information, including the session ID, session type (audio/video), custom session name (**sessionTag**), information about the corresponding application (**elementName**), and whether the session is pined on top (isTopSession). + +- Top session: session with the highest priority in the system, for example, a session that is being played. Generally, the controller must hold an **AVSessionController** object to communicate with a session. However, the controller can directly communicate with the top session, for example, directly sending a control command or key event, without holding an **AVSessionController** object. + +## Available APIs + +The table below lists the key APIs used by the controller. The APIs use either a callback or promise to return the result. The APIs listed below use a callback. They provide the same functions as their counterparts that use a promise. + +For details, see [AVSession Management](../reference/apis/js-apis-avsession.md). + +| API| Description| +| -------- | -------- | +| getAllSessionDescriptors(callback: AsyncCallback<Array<Readonly<AVSessionDescriptor>>>): void | Obtains the descriptors of all AVSessions in the system.| +| createController(sessionId: string, callback: AsyncCallback<AVSessionController>): void | Creates an AVSessionController.| +| getValidCommands(callback: AsyncCallback<Array<AVControlCommandType>>): void | Obtains valid commands supported by the AVSession.
Control commands listened by an audio and video application when it accesses the AVSession are considered as valid commands supported by the AVSession. For details, see [Provider of AVSession](using-avsession-developer.md).| +| getLaunchAbility(callback: AsyncCallback<WantAgent>): void | Obtains the UIAbility that is configured in the AVSession and can be started.
The UIAbility configured here is started when a user operates the UI of the controller, for example, clicking a widget in Media Controller.| +| sendAVKeyEvent(event: KeyEvent, callback: AsyncCallback<void>): void | Sends a key event to an AVSession through the AVSessionController object.| +| sendSystemAVKeyEvent(event: KeyEvent, callback: AsyncCallback<void>): void | Sends a key event to the top session.| +| sendControlCommand(command: AVControlCommand, callback: AsyncCallback<void>): void | Sends a control command to an AVSession through the AVSessionController object.| +| sendSystemControlCommand(command: AVControlCommand, callback: AsyncCallback<void>): void | Sends a control command to the top session.| + +## How to Develop + +To enable a system application to access the AVSession service as a controller, proceed as follows: + +1. Obtain **AVSessionDescriptor** through AVSessionManager and create an **AVSessionController** object. + The controller may obtain all **AVSessionDescriptor**s in the current system, and create an **AVSessionController** object for each session, so as to perform unified playback control on all the audio and video applications. + + ```ts + // Import the AVSessionManager module. + import AVSessionManager from '@ohos.multimedia.avsession'; + + // Define global variables. + let g_controller = new Array(); + let g_centerSupportCmd:Set = new Set(['play', 'pause', 'playNext', 'playPrevious', 'fastForward', 'rewind', 'seek','setSpeed', 'setLoopMode', 'toggleFavorite']); + let g_validCmd:Set; + // Obtain the session descriptors and create an AVSessionController object. + AVSessionManager.getAllSessionDescriptors().then((descriptors) => { + descriptors.forEach((descriptor) => { + AVSessionManager.createController(descriptor.sessionId).then((controller) => { + g_controller.push(controller); + }).catch((err) => { + console.error(`createController : ERROR : ${err.message}`); + }); + }); + }).catch((err) => { + console.error(`getAllSessionDescriptors : ERROR : ${err.message}`); + }); + + ``` + +2. Listen for the session state and service state events. + + The following session state events are available: + + - **sessionCreate**: triggered when a session is created. + - **sessionDestroy**: triggered when a session is destroyed. + - **topSessionChange**: triggered when the top session is changed. + + The service state event **sessionServiceDie** is reported when the AVSession service is abnormal. + + ```ts + // Subscribe to the 'sessionCreate' event and create an AVSessionController object. + AVSessionManager.on('sessionCreate', (session) => { + // After an AVSession is added, you must create an AVSessionController object. + AVSessionManager.createController(session.sessionId).then((controller) => { + g_controller.push(controller); + }).catch((err) => { + console.info(`createController : ERROR : ${err.message}`); + }); + }); + + // Subscribe to the 'sessionDestroy' event to enable the application to get notified when the session dies. + AVSessionManager.on('sessionDestroy', (session) => { + let index = g_controller.findIndex((controller) => { + return controller.sessionId === session.sessionId; + }); + if (index !== 0) { + g_controller[index].destroy(); + g_controller.splice(index, 1); + } + }); + // Subscribe to the 'topSessionChange' event. + AVSessionManager.on('topSessionChange', (session) => { + let index = g_controller.findIndex((controller) => { + return controller.sessionId === session.sessionId; + }); + // Place the session on the top. + if (index !== 0) { + g_controller.sort((a, b) => { + return a.sessionId === session.sessionId ? -1 : 0; + }); + } + }); + // Subscribe to the 'sessionServiceDie' event. + AVSessionManager.on('sessionServiceDie', () => { + // The server is abnormal, and the application clears resources. + console.info("Server exception."); + }) + ``` + +3. Subscribe to media information changes and other session events. + + The following media information change events are available: + + - **metadataChange**: triggered when the session metadata changes. + - **playbackStateChange**: triggered when the playback state changes. + - **activeStateChange**: triggered when the activation state of the session changes. + - **validCommandChange**: triggered when the valid commands supported by the session changes. + - **outputDeviceChange**: triggered when the output device changes. + - **sessionDestroy**: triggered when a session is destroyed. + + The controller can listen for events as required. + + ```ts + // Subscribe to the 'activeStateChange' event. + controller.on('activeStateChange', (isActive) => { + if (isActive) { + console.info("The widget corresponding to the controller is highlighted."); + } else { + console.info("The widget corresponding to the controller is invalid."); + } + }); + // Subscribe to the 'sessionDestroy' event to enable the controller to get notified when the session dies. + controller.on('sessionDestroy', () => { + console.info('on sessionDestroy : SUCCESS '); + controller.destroy().then(() => { + console.info('destroy : SUCCESS '); + }).catch((err) => { + console.info(`destroy : ERROR :${err.message}`); + }); + }); + + // Subscribe to metadata changes. + let metaFilter = ['assetId', 'title', 'description']; + controller.on('metadataChange', metaFilter, (metadata) => { + console.info(`on metadataChange assetId : ${metadata.assetId}`); + }); + // Subscribe to playback state changes. + let playbackFilter = ['state', 'speed', 'loopMode']; + controller.on('playbackStateChange', playbackFilter, (playbackState) => { + console.info(`on playbackStateChange state : ${playbackState.state}`); + }); + // Subscribe to supported command changes. + controller.on('validCommandChange', (cmds) => { + console.info(`validCommandChange : SUCCESS : size : ${cmds.size}`); + console.info(`validCommandChange : SUCCESS : cmds : ${cmds.values()}`); + g_validCmd.clear(); + for (let c of g_centerSupportCmd) { + if (cmds.has(c)) { + g_validCmd.add(c); + } + } + }); + // Subscribe to output device changes. + controller.on('outputDeviceChange', (device) => { + console.info(`on outputDeviceChange device isRemote : ${device.isRemote}`); + }); + ``` + +4. Obtain the media information transferred by the provider for display on the UI, for example, displaying the track being played and the playback state in Media Controller. + + ```ts + async getInfoFromSessionByController() { + // It is assumed that an AVSessionController object corresponding to the session already exists. For details about how to create an AVSessionController object, see the code snippet above. + let controller: AVSessionManager.AVSessionController = ALLREADY_HAVE_A_CONTROLLER; + // Obtain the session ID. + let sessionId: string = controller.sessionId; + console.info(`get sessionId by controller : isActive : ${sessionId}`); + // Obtain the activation state of the session. + let isActive: boolean = await controller.isActive(); + console.info(`get activeState by controller : ${isActive}`); + // Obtain the media information of the session. + let metadata: AVSessionManager.AVMetadata = await controller.getAVMetadata(); + console.info(`get media title by controller : ${metadata.title}`); + console.info(`get media artist by controller : ${metadata.artist}`); + // Obtain the playback information of the session. + let avPlaybackState: AVSessionManager.AVPlaybackState = await controller.getAVPlaybackState(); + console.info(`get playbackState by controller : ${avPlaybackState.state}`); + console.info(`get favoriteState by controller : ${avPlaybackState.isFavorite}`); + } + ``` + +5. Control the playback behavior, for example, sending a command to operate (play/pause/previous/next) the item being played in Media Controller. + + After listening for the control command event, the audio and video application serving as the provider needs to implement the corresponding operation. + + + ```ts + async sendCommandToSessionByController() { + // It is assumed that an AVSessionController object corresponding to the session already exists. For details about how to create an AVSessionController object, see the code snippet above. + let controller: AVSessionManager.AVSessionController = ALLREADY_HAVE_A_CONTROLLER; + // Obtain the commands supported by the session. + let validCommandTypeArray: Array = await controller.getValidCommands(); + console.info(`get validCommandArray by controller : length : ${validCommandTypeArray.length}`); + // Deliver the 'play' command. + // If the 'play' command is valid, deliver it. Normal sessions should provide and implement the playback. + if (validCommandTypeArray.indexOf('play') >= 0) { + let avCommand: AVSessionManager.AVControlCommand = {command:'play'}; + controller.sendControlCommand(avCommand); + } + // Deliver the 'pause' command. + if (validCommandTypeArray.indexOf('pause') >= 0) { + let avCommand: AVSessionManager.AVControlCommand = {command:'pause'}; + controller.sendControlCommand(avCommand); + } + // Deliver the 'playPrevious' command. + if (validCommandTypeArray.indexOf('playPrevious') >= 0) { + let avCommand: AVSessionManager.AVControlCommand = {command:'playPrevious'}; + controller.sendControlCommand(avCommand); + } + // Deliver the 'playNext' command. + if (validCommandTypeArray.indexOf('playNext') >= 0) { + let avCommand: AVSessionManager.AVControlCommand = {command:'playNext'}; + controller.sendControlCommand(avCommand); + } + } + ``` + +6. When the audio and video application exits, cancel the listener and release the resources. + + ```ts + async destroyController() { + // It is assumed that an AVSessionController object corresponding to the session already exists. For details about how to create an AVSessionController object, see the code snippet above. + let controller: AVSessionManager.AVSessionController = ALLREADY_HAVE_A_CONTROLLER; + + // Destroy the AVSessionController object. After being destroyed, it is no longer available. + controller.destroy(function (err) { + if (err) { + console.info(`Destroy controller ERROR : code: ${err.code}, message: ${err.message}`); + } else { + console.info('Destroy controller SUCCESS'); + } + }); + } + ``` diff --git a/en/application-dev/media/using-avsession-developer.md b/en/application-dev/media/using-avsession-developer.md new file mode 100644 index 0000000000000000000000000000000000000000..07bd4bf1297f3afc5352d30e9acd674fe056f815 --- /dev/null +++ b/en/application-dev/media/using-avsession-developer.md @@ -0,0 +1,198 @@ +# AVSession Provider + +An audio and video application needs to access the AVSession service as a provider in order to display media information in the controller (for example, Media Controller) and respond to control commands delivered by the controller. + +## Basic Concepts + +- AVMetadata: media data related attributes, including the IDs of the current media asset (assetId), previous media asset (previousAssetId), and next media asset (nextAssetId), title, author, album, writer, and duration. + +- AVPlaybackState: playback state attributes, including the playback state, position, speed, buffered time, loop mode, and whether the media asset is favorited (**isFavorite**). + +## Available APIs + +The table below lists the key APIs used by the provider. The APIs use either a callback or promise to return the result. The APIs listed below use a callback. They provide the same functions as their counterparts that use a promise. + +For details, see [AVSession Management](../reference/apis/js-apis-avsession.md). + +| API| Description| +| -------- | -------- | +| createAVSession(context: Context, tag: string, type: AVSessionType, callback: AsyncCallback<AVSession>): void | Creates an AVSession.
Only one AVSession can be created for a UIAbility.| +| setAVMetadata(data: AVMetadata, callback: AsyncCallback<void>): void | Sets AVSession metadata.| +| setAVPlaybackState(state: AVPlaybackState, callback: AsyncCallback<void>): void | Sets the AVSession playback state.| +| setLaunchAbility(ability: WantAgent, callback: AsyncCallback<void>): void | Starts a UIAbility.| +| getController(callback: AsyncCallback<AVSessionController>): void | Obtains the controller of the AVSession.| +| activate(callback: AsyncCallback<void>): void | Activates the AVSession.| +| destroy(callback: AsyncCallback<void>): void | Destroys the AVSession.| + +## How to Develop + +To enable an audio and video application to access the AVSession service as a provider, proceed as follows: + +1. Call an API in the **AVSessionManager** class to create and activate an **AVSession** object. + + ```ts + import AVSessionManager from '@ohos.multimedia.avsession'; // Import the AVSessionManager module. + + // Create an AVSession object. + async createSession() { + let session: AVSessionManager.AVSession = await AVSessionManager.createAVSession(this.context, 'SESSION_NAME', 'audio'); + session.activate(); + console.info(`session create done : sessionId : ${session.sessionId}`); + } + ``` + +2. Set AVSession information, which includes: + - AVMetadata + - AVPlaybackState + + The controller will call an API in the **AVSessionController** class to obtain the information and display or process the information. + + ```ts + async setSessionInfo() { + // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above. + let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION; + // The player logic that triggers changes in the session metadata and playback state is omitted here. + // Set necessary session metadata. + let metadata: AVSessionManager.AVMetadata = { + assetId: "0", + title: "TITLE", + artist: "ARTIST" + }; + session.setAVMetadata(metadata).then(() => { + console.info('SetAVMetadata successfully'); + }).catch((err) => { + console.info(`SetAVMetadata BusinessError: code: ${err.code}, message: ${err.message}`); + }); + // Set the playback state to paused and set isFavorite to false. + let playbackState: AVSessionManager.AVPlaybackState = { + state:AVSessionManager.PlaybackState.PLAYBACK_STATE_PAUSE, + isFavorite:false + }; + session.setAVPlaybackState(playbackState, function (err) { + if (err) { + console.info(`SetAVPlaybackState BusinessError: code: ${err.code}, message: ${err.message}`); + } else { + console.info('SetAVPlaybackState successfully'); + } + }); + } + ``` + +3. Set the UIAbility to be started by the controller. The UIAbility configured here is started when a user operates the UI of the controller, for example, clicking a widget in Media Controller. + The UIAbility is set through the **WantAgent** API. For details, see [WantAgent](../reference/apis/js-apis-app-ability-wantAgent.md). + + ```ts + import WantAgent from "@ohos.app.ability.wantAgent"; + ``` + + ```ts + // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above. + let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION; + let wantAgentInfo: { + wants: [ + { + bundleName: "com.example.musicdemo", + abilityName: "com.example.musicdemo.MainAbility" + } + ], + operationType: WantAgent.OperationType.START_ABILITIES, + requestCode: 0, + wantAgentFlags: [WantAgent.WantAgentFlags.UPDATE_PRESENT_FLAG] + } + WantAgent.getWantAgent(wantAgentInfo).then((agent) => { + session.setLaunchAbility(agent) + }) + ``` + +4. Listen for control commands delivered by the controller, for example, Media Controller. + > **NOTE** + > + > After the provider registers a listener for the control command event, the event will be reflected in **getValidCommands()** of the controller. In other words, the controller determines that the command is valid and triggers the corresponding event as required. To ensure that the control commands delivered by the controller can be executed normally, the provider should not use a null implementation for listening. + + ```ts + async setListenerForMesFromController() { + // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above. + let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION; + // Generally, logic processing on the player is implemented in the listener. + // After the processing is complete, use the setter to synchronize the playback information. For details, see the code snippet above. + session.on('play', () => { + console.info('on play , do play task'); + + // do some tasks ··· + }); + session.on('pause', () => { + console.info('on pause , do pause task'); + // do some tasks ··· + }); + session.on('stop', () => { + console.info('on stop , do stop task'); + // do some tasks ··· + }); + session.on('playNext', () => { + console.info('on playNext , do playNext task'); + // do some tasks ··· + }); + session.on('playPrevious', () => { + console.info('on playPrevious , do playPrevious task'); + // do some tasks ··· + }); + } + ``` + +5. Obtain an **AVSessionController** object for this **AVSession** object for interaction. + + ```ts + async createControllerFromSession() { + // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above. + let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION; + + // Obtain an AVSessionController object for this AVSession object. + let controller: AVSessionManager.AVSessionController = await session.getController(); + + // The AVSessionController object can interact with the AVSession object, for example, by delivering a control command. + let avCommand: AVSessionManager.AVControlCommand = {command:'play'}; + controller.sendControlCommand(avCommand); + + // Alternatively, listen for state changes. + controller.on('playbackStateChange', 'all', (state: AVSessionManager.AVPlaybackState) => { + + // do some things + }); + + // The AVSessionController object can perform many operations. For details, see the description of the controller. + } + ``` + +6. When the audio and video application exits and does not need to continue playback, cancel the listener and destroy the **AVSession** object. + The code snippet below is used for canceling the listener for control commands: + + ```ts + async unregisterSessionListener() { + // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above. + let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION; + + // Cancel the listener of the AVSession object. + session.off('play'); + session.off('pause'); + session.off('stop'); + session.off('playNext'); + session.off('playPrevious'); + } + ``` + + The code snippet below is used for destroying the AVSession object: + + ```ts + async destroySession() { + // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above. + let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION; + // Destroy the AVSession object. + session.destroy(function (err) { + if (err) { + console.info(`Destroy BusinessError: code: ${err.code}, message: ${err.message}`); + } else { + console.info('Destroy : SUCCESS '); + } + }); + } + ``` diff --git a/en/application-dev/media/using-distributed-avsession.md b/en/application-dev/media/using-distributed-avsession.md new file mode 100644 index 0000000000000000000000000000000000000000..c1835d661fdd2b57b7dce0f2507dbea748eaea7e --- /dev/null +++ b/en/application-dev/media/using-distributed-avsession.md @@ -0,0 +1,55 @@ +# Using Distributed AVSession + +## Basic Concepts + +- Remote AVSession: an AVSession automatically created on the remote device by the AVSession service for synchronization with an AVSession on the local device. + +- Remote AVSessionController: AVSessionController automatically created on the remote device after projection. + +## Available APIs + +The table below describes the key APIs used for remote projection with the distributed AVSession. The APIs use either a callback or promise to return the result. The APIs listed below use a callback. They provide the same functions as their counterparts that use a promise. + +For details, see [AVSession Management](../reference/apis/js-apis-avsession.md). + +| API| Description| +| -------- | -------- | +| castAudio(session: SessionToken \| 'all', audioDevices: Array<audio.AudioDeviceDescriptor>, callback: AsyncCallback<void>): void | Casts a session to a list of devices.| + +## How to Develop + +To enable a system application that accesses the AVSession service as the controller to use the distributed AVSession for projection, proceed as follows: + +1. Import the modules. Before projection, you must obtain the AudioDeviceDescriptor from the audio module. Therefore, import the audio module and AVSessionManager module. + + ```ts + import AVSessionManager from '@ohos.multimedia.avsession'; + import audio from '@ohos.multimedia.audio'; + ``` + +2. Use **castAudio** in the **AVSessionManager** class to project all sessions of the local device to another device. + + ```ts + // Cast the sessions to another device. + let audioManager = audio.getAudioManager(); + let audioRoutingManager = audioManager.getRoutingManager(); + let audioDevices; + await audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => { + audioDevices = data; + console.info('Promise returned to indicate that the device list is obtained.'); + }).catch((err) => { + console.info(`getDevices : ERROR : ${err.message}`); + }); + + AVSessionManager.castAudio('all', audioDevices).then(() => { + console.info('createController : SUCCESS'); + }).catch((err) => { + console.info(`createController : ERROR : ${err.message}`); + }); + ``` + + After the system application on the local service initiates projection to a remote device, the AVSession framework instructs the AVSession service of the remote device to create a remote AVSession. When the AVSession on the local device changes (for example, the media information or playback state changes), the AVSession framework automatically synchronizes the change to the remote device. + + The AVSession processing mechanism on the remote device is consistent with that on the local device. That is, the controller (for example, the Media Controller) on the remote device listens for the AVSession creation event, and creates a remote **AVSessionController** object to manage the remote AVSession. In addition, the control commands are automatically synchronized by the AVSession framework to the local device. + + The provider (for example, an audio and video application) on the local device listens for control command events, so as to respond to the commands from the remote device in time. diff --git a/en/application-dev/media/using-opensl-es-for-playback.md b/en/application-dev/media/using-opensl-es-for-playback.md new file mode 100644 index 0000000000000000000000000000000000000000..c5dedbba659154a1893a471e5e9a3d33d33be20a --- /dev/null +++ b/en/application-dev/media/using-opensl-es-for-playback.md @@ -0,0 +1,131 @@ +# Using OpenSL ES for Audio Playback + +OpenSL ES, short for Open Sound Library for Embedded Systems, is an embedded, cross-platform audio processing library that is free of charge. It provides high-performance and low-latency APIs for you to develop applications running on embedded mobile multimedia devices. OpenHarmony have implemented certain native APIs based on [OpenSL ES](https://www.khronos.org/opensles/) 1.0.1 API specifications developed by the [Khronos Group](https://www.khronos.org/). You can use these APIs through and . + +## OpenSL ES on OpenHarmony + +Currently, OpenHarmony implements parts of [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) to implement basic audio playback functionalities. + +If an API that has not been implemented on OpenHarmony is called, **SL_RESULT_FEATURE_UNSUPPORTED** is returned. + +The following lists the OpenSL ES APIs that have been implemented on OpenHarmony. For details, see the [OpenSL ES](https://www.khronos.org/opensles/) specifications. + +- **Engine APIs implemented on OpenHarmony** + - SLresult (\*CreateAudioPlayer) (SLEngineItf self, SLObjectItf \* pPlayer, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired) + - SLresult (\*CreateAudioRecorder) (SLEngineItf self, SLObjectItf \* pRecorder, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired) + - SLresult (\*CreateOutputMix) (SLEngineItf self, SLObjectItf \* pMix, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired) + +- **Object APIs implemented on OpenHarmony** + - SLresult (\*Realize) (SLObjectItf self, SLboolean async) + - SLresult (\*GetState) (SLObjectItf self, SLuint32 \* pState) + - SLresult (\*GetInterface) (SLObjectItf self, const SLInterfaceID iid, void \* pInterface) + - void (\*Destroy) (SLObjectItf self) + +- **Playback APIs implemented on OpenHarmony** + - SLresult (\*SetPlayState) (SLPlayItf self, SLuint32 state) + - SLresult (\*GetPlayState) (SLPlayItf self, SLuint32 \*pState) + +- **Volume control APIs implemented on OpenHarmony** + - SLresult (\*SetVolumeLevel) (SLVolumeItf self, SLmillibel level) + - SLresult (\*GetVolumeLevel) (SLVolumeItf self, SLmillibel \*pLevel) + - SLresult (\*GetMaxVolumeLevel) (SLVolumeItf self, SLmillibel \*pMaxLevel) + +- **BufferQueue APIs implemented on OpenHarmony** + + The APIs listed below can be used only after is introduced. + | API| Description| + | -------- | -------- | + | SLresult (\*Enqueue) (SLOHBufferQueueItf self, const void \*buffer, SLuint32 size) | Adds a buffer to the corresponding queue.
For an audio playback operation, this API adds the buffer with audio data to the **filledBufferQ_** queue. For an audio recording operation, this API adds the idle buffer after recording data storage to the **freeBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the pointer to the buffer with audio data or the pointer to the idle buffer after the recording data is stored.
The **size** parameter indicates the size of the buffer.| + | SLresult (\*Clear) (SLOHBufferQueueItf self) | Releases a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.| + | SLresult (\*GetState) (SLOHBufferQueueItf self, SLOHBufferQueueState \*state) | Obtains the state of a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **state** parameter indicates the pointer to the state of the **BufferQueue** object.| + | SLresult (\*RegisterCallback) (SLOHBufferQueueItf self, SlOHBufferQueueCallback callback, void\* pContext) | Registers a callback.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **callback** parameter indicates the callback to be registered for the audio playback or recording operation.
The **pContext** parameter indicates the pointer to the audio file to be played for an audio playback operation or the pointer to the audio file to be recorded for an audio recording operation.| + | SLresult (\*GetBuffer) (SLOHBufferQueueItf self, SLuint8\*\* buffer, SLuint32\* size) | Obtains a buffer.
For an audio playback operation, this API obtains an idle buffer from the **freeBufferQ_** queue. For an audio recording operation, this API obtains the buffer that carries recording data from the **filledBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the double pointer to the idle buffer or the buffer carrying recording data.
The **size** parameter indicates the size of the buffer.| + +## Sample Code + +Refer to the sample code below to play an audio file. + +1. Add the header files. + + ```c++ + #include + #include + #include + ``` + +2. Use the **slCreateEngine** API to obtain an **engine** instance. + + ```c++ + SLObjectItf engineObject = nullptr; + slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr); + (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE); + ``` + +3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** API. + + ```c++ + SLEngineItf engineEngine = nullptr; + (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine); + ``` + +4. Configure the player and create an **AudioPlayer** instance. + + ```c++ + SLDataLocator_BufferQueue slBufferQueue = { + SL_DATALOCATOR_BUFFERQUEUE, + 0 + }; + + // Configure the parameters based on the audio file format. + SLDataFormat_PCM pcmFormat = { + SL_DATAFORMAT_PCM, + 2, // Number of channels. + SL_SAMPLINGRATE_48, // Sampling rate. + SL_PCMSAMPLEFORMAT_FIXED_16, // Audio sample format. + 0, + 0, + 0 + }; + SLDataSource slSource = {&slBufferQueue, &pcmFormat}; + SLObjectItf pcmPlayerObject = nullptr; + (*engineEngine)->CreateAudioPlayer(engineEngine, &pcmPlayerObject, &slSource, null, 0, nullptr, nullptr); + (*pcmPlayerObject)->Realize(pcmPlayerObject, SL_BOOLEAN_FALSE); + ``` + +5. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** API. + + ```c++ + SLOHBufferQueueItf bufferQueueItf; + (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf); + ``` + +6. Open an audio file and register the **BufferQueueCallback** function. + + ```c++ + static void BufferQueueCallback (SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size) + { + SLuint8 *buffer = nullptr; + SLuint32 pSize; + (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, &pSize); + // Write the audio data to be played to the buffer. + (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size); + } + void *pContext; // This callback can be used to obtain the custom context information passed in. + (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, pContext); + ``` + +7. Obtain the **playItf** instance of the **SL_PLAYSTATE_PLAYING** API and start playing. + + ```c++ + SLPlayItf playItf = nullptr; + (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_PLAY, &playItf); + (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PLAYING); + ``` + +8. Stop playing. + + ```c++ + (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_STOPPED); + (*pcmPlayerObject)->Destroy(pcmPlayerObject); + (*engineObject)->Destroy(engineObject); + ``` diff --git a/en/application-dev/media/using-opensl-es-for-recording.md b/en/application-dev/media/using-opensl-es-for-recording.md new file mode 100644 index 0000000000000000000000000000000000000000..55a18fc561c0117d5aff5aaedb22c36f1b7706bf --- /dev/null +++ b/en/application-dev/media/using-opensl-es-for-recording.md @@ -0,0 +1,148 @@ +# Using OpenSL ES for Audio Recording + +OpenSL ES, short for Open Sound Library for Embedded Systems, is an embedded, cross-platform audio processing library that is free of charge. It provides high-performance and low-latency APIs for you to develop applications running on embedded mobile multimedia devices. OpenHarmony have implemented certain native APIs based on [OpenSL ES](https://www.khronos.org/opensles/) 1.0.1 API specifications developed by the [Khronos Group](https://www.khronos.org/). You can use these APIs through and . + +## OpenSL ES on OpenHarmony + +Currently, OpenHarmony implements parts of [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) to implement basic audio recording functionalities. + +If an API that has not been implemented on OpenHarmony is called, **SL_RESULT_FEATURE_UNSUPPORTED** is returned. + +The following lists the OpenSL ES APIs that have been implemented on OpenHarmony. For details, see the [OpenSL ES](https://www.khronos.org/opensles/) specifications. + +- **Engine APIs implemented on OpenHarmony** + - SLresult (\*CreateAudioPlayer) (SLEngineItf self, SLObjectItf \* pPlayer, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired) + - SLresult (\*CreateAudioRecorder) (SLEngineItf self, SLObjectItf \* pRecorder, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired) + - SLresult (\*CreateOutputMix) (SLEngineItf self, SLObjectItf \* pMix, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired) + +- **Object APIs implemented on OpenHarmony** + - SLresult (\*Realize) (SLObjectItf self, SLboolean async) + - SLresult (\*GetState) (SLObjectItf self, SLuint32 \* pState) + - SLresult (\*GetInterface) (SLObjectItf self, const SLInterfaceID iid, void \* pInterface) + - void (\*Destroy) (SLObjectItf self) + +- **Recorder APIs implemented on OpenHarmony** + - SLresult (\*SetRecordState) (SLRecordItf self, SLuint32 state) + - SLresult (\*GetRecordState) (SLRecordItf self,SLuint32 \*pState) + +- **BufferQueue APIs implemented on OpenHarmony** + + The APIs listed below can be used only after is introduced. + | API| Description| + | -------- | -------- | + | SLresult (\*Enqueue) (SLOHBufferQueueItf self, const void \*buffer, SLuint32 size) | Adds a buffer to the corresponding queue.
For an audio playback operation, this API adds the buffer with audio data to the **filledBufferQ_** queue. For an audio recording operation, this API adds the idle buffer after recording data storage to the **freeBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the pointer to the buffer with audio data or the pointer to the idle buffer after the recording data is stored.
The **size** parameter indicates the size of the buffer.| + | SLresult (\*Clear) (SLOHBufferQueueItf self) | Releases a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.| + | SLresult (\*GetState) (SLOHBufferQueueItf self, SLOHBufferQueueState \*state) | Obtains the state of a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **state** parameter indicates the pointer to the state of the **BufferQueue** object.| + | SLresult (\*RegisterCallback) (SLOHBufferQueueItf self, SlOHBufferQueueCallback callback, void\* pContext) | Registers a callback.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **callback** parameter indicates the callback to be registered for the audio playback or recording operation.
The **pContext** parameter indicates the pointer to the audio file to be played for an audio playback operation or the pointer to the audio file to be recorded for an audio recording operation.| + | SLresult (\*GetBuffer) (SLOHBufferQueueItf self, SLuint8\*\* buffer, SLuint32\* size) | Obtains a buffer.
For an audio playback operation, this API obtains an idle buffer from the **freeBufferQ_** queue. For an audio recording operation, this API obtains the buffer that carries recording data from the **filledBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the double pointer to the idle buffer or the buffer carrying recording data.
The **size** parameter indicates the size of the buffer.| + +## Sample Code + +Refer to the sample code below to record an audio file. + +1. Add the header files. + + ```c++ + #include + #include + #include + ``` + +2. Use the **slCreateEngine** API to create and instantiate an **engine** object. + + ```c++ + SLObjectItf engineObject = nullptr; + slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr); + (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE); + ``` + +3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** API. + + ```c++ + SLEngineItf engineItf = nullptr; + (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineItf); + ``` + +4. Configure the recorder information (including the input source **audiosource** and output source **audiosink**), and create a **pcmCapturerObject** instance. + + ```c++ + SLDataLocator_IODevice io_device = { + SL_DATALOCATOR_IODEVICE, + SL_IODEVICE_AUDIOINPUT, + SL_DEFAULTDEVICEID_AUDIOINPUT, + NULL + }; + SLDataSource audioSource = { + &io_device, + NULL + }; + SLDataLocator_BufferQueue buffer_queue = { + SL_DATALOCATOR_BUFFERQUEUE, + 3 + }; + // Configure the parameters based on the audio file format. + SLDataFormat_PCM format_pcm = { + SL_DATAFORMAT_PCM, // Input audio format. + 1, // Mono channel. + SL_SAMPLINGRATE_44_1, // Sampling rate, 44100 Hz. + SL_PCMSAMPLEFORMAT_FIXED_16, // Audio sampling format, a signed 16-bit integer in little-endian format. + 0, + 0, + 0 + }; + SLDataSink audioSink = { + &buffer_queue, + &format_pcm + }; + + SLObjectItf pcmCapturerObject = nullptr; + (*engineItf)->CreateAudioRecorder(engineItf, &pcmCapturerObject, + &audioSource, &audioSink, 0, nullptr, nullptr); + (*pcmCapturerObject)->Realize(pcmCapturerObject, SL_BOOLEAN_FALSE); + + ``` + +5. Obtain the **recordItf** instance of the **SL_IID_RECORD** API. + + ```c++ + SLRecordItf recordItf; + (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_RECORD, &recordItf); + ``` + +6. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** API. + + ```c++ + SLOHBufferQueueItf bufferQueueItf; + (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf); + ``` + +7. Register the **BufferQueueCallback** function. + + ```c++ + static void BufferQueueCallback(SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size) + { + // Obtain the user information passed in during the registration from pContext. + SLuint8 *buffer = nullptr; + SLuint32 pSize = 0; + (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, &pSize); + if (buffer != nullptr) { + // The recording data can be read from the buffer for subsequent processing. + (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size); + } + } + void *pContext; // This callback can be used to obtain the custom context information passed in. + (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, pContext); + ``` + +8. Start audio recording. + + ```c++ + (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_RECORDING); + ``` + +9. Stop audio recording. + + ```c++ + (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_STOPPED); + (*pcmCapturerObject)->Destroy(pcmCapturerObject); + ``` diff --git a/en/application-dev/media/using-toneplayer-for-playback.md b/en/application-dev/media/using-toneplayer-for-playback.md new file mode 100644 index 0000000000000000000000000000000000000000..11a528786b5bae712d8c4f07b9cad4ee29af2f48 --- /dev/null +++ b/en/application-dev/media/using-toneplayer-for-playback.md @@ -0,0 +1,140 @@ +# Using TonePlayer for Audio Playback (for System Applications Only) + +TonePlayer9+ provides APIs for playing and managing Dual Tone Multi Frequency (DTMF) tones, such as dial tones, ringback tones, supervisory tones, and proprietary tones. The main task of the TonePlayer is to generate sine waves of different frequencies by using the built-in algorithm based on the [ToneType](../reference/apis/js-apis-audio.md#tonetype9)s and add the sine waves to create a sound. The sound can then be played by using the [AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8), and the playback task can also be managed by using the [AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8). The full process includes loading the DTMF tone configuration, starting DTMF tone playing, stopping the playback, and releasing the resources associated with the **TonePlayer** object. For details about the APIs, see the [TonePlayer API Reference](../reference/apis/js-apis-audio.md#toneplayer9). + + +## Supported Tone Types + +The table below lists the supported [ToneType](../reference/apis/js-apis-audio.md#tonetype9)s. You can call **load()** with **audio.ToneType.*type*** as a parameter to load the tone resource of the specified type. + +| Tone Type| Value| Description| +| -------- | -------- | -------- | +| TONE_TYPE_DIAL_0 | 0 | DTMF tone of key 0.| +| TONE_TYPE_DIAL_1 | 1 | DTMF tone of key 1.| +| TONE_TYPE_DIAL_2 | 2 | DTMF tone of key 2.| +| TONE_TYPE_DIAL_3 | 3 | DTMF tone of key 3.| +| TONE_TYPE_DIAL_4 | 4 | DTMF tone of key 4.| +| TONE_TYPE_DIAL_5 | 5 | DTMF tone of key 5.| +| TONE_TYPE_DIAL_6 | 6 | DTMF tone of key 6.| +| TONE_TYPE_DIAL_7 | 7 | DTMF tone of key 7.| +| TONE_TYPE_DIAL_8 | 8 | DTMF tone of key 8.| +| TONE_TYPE_DIAL_9 | 9 | DTMF tone of key 9.| +| TONE_TYPE_DIAL_S | 10 | DTMF tone of the star key (*).| +| TONE_TYPE_DIAL_P | 11 | DTMF tone of the pound key (#).| +| TONE_TYPE_DIAL_A | 12 | DTMF tone of key A.| +| TONE_TYPE_DIAL_B | 13 | DTMF tone of key B.| +| TONE_TYPE_DIAL_C | 14 | DTMF tone of key C.| +| TONE_TYPE_DIAL_D | 15 | DTMF tone of key D.| +| TONE_TYPE_COMMON_SUPERVISORY_DIAL | 100 | Supervisory tone - dial tone.| +| TONE_TYPE_COMMON_SUPERVISORY_BUSY | 101 | Supervisory tone - busy.| +| TONE_TYPE_COMMON_SUPERVISORY_CONGESTION | 102 | Supervisory tone - congestion.| +| TONE_TYPE_COMMON_SUPERVISORY_RADIO_ACK | 103 | Supervisory tone - radio path acknowledgment.| +| TONE_TYPE_COMMON_SUPERVISORY_RADIO_NOT_AVAILABLE | 104 | Supervisory tone - radio path not available.| +| TONE_TYPE_COMMON_SUPERVISORY_CALL_WAITING | 106 | Supervisory tone - call waiting tone.| +| TONE_TYPE_COMMON_SUPERVISORY_RINGTONE | 107 | Supervisory tone - ringing tone.| +| TONE_TYPE_COMMON_PROPRIETARY_BEEP | 200 | Proprietary tone - beep tone.| +| TONE_TYPE_COMMON_PROPRIETARY_ACK | 201 | Proprietary tone - ACK.| +| TONE_TYPE_COMMON_PROPRIETARY_PROMPT | 203 | Proprietary tone - PROMPT.| +| TONE_TYPE_COMMON_PROPRIETARY_DOUBLE_BEEP | 204 | Proprietary tone - double beep tone.| + + +## How to Develop + +To implement audio playback with the TonePlayer, perform the following steps: + +1. Create a **TonePlayer** instance. + + ```ts + import audio from '@ohos.multimedia.audio'; + let audioRendererInfo = { + content : audio.ContentType.CONTENT_TYPE_SONIFICATION, + usage : audio.StreamUsage.STREAM_USAGE_MEDIA, + rendererFlags : 0 + }; + tonePlayerPromise = audio.createTonePlayer(audioRendererInfo); + ``` + +2. Load the DTMF tone configuration of the specified type. + + ```ts + tonePlayerPromise.load(audio.ToneType.TONE_TYPE_DIAL_0); + ``` + +3. Start DTMF tone playing. + + ```ts + tonePlayerPromise.start(); + ``` + +4. Stop the tone that is being played. + + ```ts + tonePlayerPromise.stop(); + ``` + +5. Release the resources associated with the **TonePlayer** instance. + + ```ts + tonePlayerPromise.release(); + ``` + +If the APIs are not called in the preceding sequence, the error code **6800301 NAPI_ERR_SYSTEM** is returned. + + +## Sample Code + +Refer to the following code to play the DTMF tone when the dial key on the keyboard is pressed. + +To prevent the UI thread from being blocked, most **TonePlayer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the promise functions. For more information, see [TonePlayer](../reference/apis/js-apis-audio.md#toneplayer9). + + +```ts +import audio from '@ohos.multimedia.audio'; + +export class TonelayerDemo { + private timer : number; + private timerPro : number; + // Promise mode. + async testTonePlayerPromise(type) { + console.info('testTonePlayerPromise start'); + if (this.timerPro) clearTimeout(this.timerPro); + let tonePlayerPromise; + let audioRendererInfo = { + content : audio.ContentType.CONTENT_TYPE_SONIFICATION, + usage : audio.StreamUsage.STREAM_USAGE_MEDIA, + rendererFlags : 0 + }; + this.timerPro = setTimeout(async () => { + try { + console.info('testTonePlayerPromise: createTonePlayer'); + // Create a DTMF player. + tonePlayerPromise = await audio.createTonePlayer(audioRendererInfo); + console.info('testTonePlayerPromise: createTonePlayer-success'); + console.info(`testTonePlayerPromise: load type: ${type}`); + // Load the tone configuration of the specified type. + await tonePlayerPromise.load(type); + console.info('testTonePlayerPromise: load-success'); + console.info(`testTonePlayerPromise: start type: ${type}`); + // Start DTMF tone playing. + await tonePlayerPromise.start(); + console.info('testTonePlayerPromise: start-success'); + console.info(`testTonePlayerPromise: stop type: ${type}`); + setTimeout(async()=>{ + // Stop the tone that is being played. + await tonePlayerPromise.stop(); + console.info('testTonePlayerPromise: stop-success'); + console.info(`testTonePlayerPromise: release type: ${type}`); + // Release the resources associated with the TonePlayer instance. + await tonePlayerPromise.release(); + console.info('testTonePlayerPromise: release-success'); + }, 30) + } catch(err) { + console.error(`testTonePlayerPromise err : ${err}`); + } + }, 200) + }; + async testTonePlayer() { + this.testTonePlayerPromise(audio.ToneType.TONE_TYPE_DIAL_0); + } +} +``` diff --git a/en/application-dev/media/video-playback.md b/en/application-dev/media/video-playback.md index d4c895b452aa31b28690bd96bd9ef0fac64c4eb4..fff4aa830ddc45e7d20e0fd06655adfdc5243fe5 100644 --- a/en/application-dev/media/video-playback.md +++ b/en/application-dev/media/video-playback.md @@ -1,419 +1,178 @@ -# Video Playback Development +# Video Playback -## Introduction - -You can use video playback APIs to convert audio data into audible analog signals and play the signals using output devices. You can also manage playback tasks. For example, you can start, suspend, stop playback, release resources, set the volume, seek to a playback position, set the playback speed, and obtain track information. This document describes development for the following video playback scenarios: full-process, normal playback, video switching, and loop playback. - -## Working Principles - -The following figures show the video playback state transition and the interaction with external modules for video playback. - -**Figure 1** Video playback state transition - -![en-us_image_video_state_machine](figures/en-us_image_video_state_machine.png) - -**Figure 2** Interaction with external modules for video playback - -![en-us_image_video_player](figures/en-us_image_video_player.png) - -**NOTE**: When a third-party application calls a JS interface provided by the JS interface layer, the framework layer invokes the audio component through the media service of the native framework to output the audio data decoded by the software to the audio HDI. The graphics subsystem outputs the image data decoded by the codec HDI at the hardware interface layer to the display HDI. In this way, video playback is implemented. - -*Note: Video playback requires hardware capabilities such as display, audio, and codec.* - -1. A third-party application obtains a surface ID from the XComponent. -2. The third-party application transfers the surface ID to the VideoPlayer JS. -3. The media service flushes the frame data to the surface buffer. - -## Compatibility - -Use the mainstream playback formats and resolutions, rather than custom ones to avoid playback failures, frame freezing, and artifacts. The system is not affected by incompatibility issues. If such an issue occurs, you can exit stream playback mode. - -The table below lists the mainstream playback formats and resolutions. - -| Video Container Format| Description | Resolution | -| :----------: | :-----------------------------------------------: | :--------------------------------: | -| mp4 | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| -| mkv | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| -| ts | Video format: H.264/MPEG-2/MPEG-4; audio format: AAC/MP3 | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| -| webm | Video format: VP8; audio format: VORBIS | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p| - -## How to Develop - -For details about the APIs, see [VideoPlayer in the Media API](../reference/apis/js-apis-media.md#videoplayer8). - -### Full-Process Scenario - -The full video playback process includes creating an instance, setting the URL, setting the surface ID, preparing for video playback, playing video, pausing playback, obtaining track information, seeking to a playback position, setting the volume, setting the playback speed, stopping playback, resetting the playback configuration, and releasing resources. - -For details about the **url** types supported by **VideoPlayer**, see the [url attribute](../reference/apis/js-apis-media.md#videoplayer_attributes). - -For details about how to create an XComponent, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md). - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' -export class VideoPlayerDemo { - // Report an error in the case of a function invocation failure. - failureCallback(error) { - console.info(`error happened,error Name is ${error.name}`); - console.info(`error happened,error Code is ${error.code}`); - console.info(`error happened,error Message is ${error.message}`); - } - - // Report an error in the case of a function invocation exception. - catchCallback(error) { - console.info(`catch error happened,error Name is ${error.name}`); - console.info(`catch error happened,error Code is ${error.code}`); - console.info(`catch error happened,error Message is ${error.message}`); - } - - // Used to print the video track information. - printfDescription(obj) { - for (let item in obj) { - let property = obj[item]; - console.info('key is ' + item); - console.info('value is ' + property); - } - } - - async videoPlayerDemo() { - let videoPlayer = undefined; - let surfaceID = 'test' // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. For details about the document link, see the method of creating the XComponent. - let fdPath = 'fd://' - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile" command. - let path = '/data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile/H264_AAC.mp4'; - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - // Call createVideoPlayer to create a VideoPlayer instance. - await media.createVideoPlayer().then((video) => { - if (typeof (video) != 'undefined') { - console.info('createVideoPlayer success!'); - videoPlayer = video; - } else { - console.info('createVideoPlayer fail!'); +OpenHarmony provides two solutions for video playback development: + +- [AVPlayer](using-avplayer-for-playback.md) class: provides ArkTS and JS APIs to implement audio and video playback. It also supports parsing streaming media and local assets, decapsulating media assets, decoding video, and rendering video. It is applicable to end-to-end playback of media assets and can be used to play video files in MP4 and MKV formats. + +- component: encapsulates basic video playback capabilities. It can be used to play video files after the data source and basic information are set. However, its scalability is poor. This component is provided by ArkUI. For details about how to use this component for video playback development, see [Video Component](../ui/arkts-common-components-video-player.md). + +In this topic, you will learn how to use the AVPlayer to develop a video playback service that plays a complete video file. If you want the application to continue playing the video in the background or when the screen is off, you must use the [AVSession](avsession-overview.md) and [continuous task](../task-management/continuous-task-dev-guide.md) to prevent the playback from being forcibly interrupted by the system. + +## Development Guidelines + +The full playback process includes creating an **AVPlayer** instance, setting the media asset to play and the window to display the video, setting playback parameters (volume, speed, and scale type), controlling playback (play, pause, seek, and stop), resetting the playback configuration, and releasing the instance. During application development, you can use the **state** attribute of the AVPlayer to obtain the AVPlayer state or call **on('stateChange')** to listen for state changes. If the application performs an operation when the AudioPlayer is not in the given state, the system may throw an exception or generate other undefined behavior. + +**Figure 1** Playback state transition + +![Playback state change](figures/video-playback-status-change.png) + +For details about the state, see [AVPlayerState](../reference/apis/js-apis-media.md#avplayerstate9). When the AVPlayer is in the **prepared**, **playing**, **paused**, or **completed** state, the playback engine is working and a large amount of RAM is occupied. If your application does not need to use the AVPlayer, call **reset()** or **release()** to release the instance. + +### How to Develop + +Read [AVPlayer](../reference/apis/js-apis-media.md#avplayer9) for the API reference. + +1. Call **createAVPlayer()** to create an **AVPlayer** instance. The AVPlayer is the **idle** state. + +2. Set the events to listen for, which will be used in the full-process scenario. The table below lists the supported events. + | Event Type| Description| + | -------- | -------- | + | stateChange | Mandatory; used to listen for changes of the **state** attribute of the AVPlayer.| + | error | Mandatory; used to listen for AVPlayer errors.| + | durationUpdate | Used to listen for progress bar updates to refresh the media asset duration.| + | timeUpdate | Used to listen for the current position of the progress bar to refresh the current time.| + | seekDone | Used to listen for the completion status of the **seek()** request.
This event is reported when the AVPlayer seeks to the playback position specified in **seek()**.| + | speedDone | Used to listen for the completion status of the **setSpeed()** request.
This event is reported when the AVPlayer plays video at the speed specified in **setSpeed()**.| + | volumeChange | Used to listen for the completion status of the **setVolume()** request.
This event is reported when the AVPlayer plays video at the volume specified in **setVolume()**.| + | bitrateDone | Used to listen for the completion status of the **setBitrate()** request, which is used for HTTP Live Streaming (HLS) streams.
This event is reported when the AVPlayer plays video at the bit rate specified in **setBitrate()**.| + | availableBitrates | Used to listen for available bit rates of HLS resources. The available bit rates are provided for **setBitrate()**.| + | bufferingUpdate | Used to listen for network playback buffer information.| + | startRenderFrame | Used to listen for the rendering time of the first frame during video playback.| + | videoSizeChange | Used to listen for the width and height of video playback and adjust the window size and ratio.| + | audioInterrupt | Used to listen for audio interruption. This event is used together with the **audioInterruptMode** attribute.
This event is reported when the current audio playback is interrupted by another (for example, when a call is coming), so the application can process the event in time.| + +3. Set the media asset URL. The AVPlayer enters the **initialized** state. + > **NOTE** + > + > The URL in the code snippet below is for reference only. You need to check the media asset validity and set the URL based on service requirements. + > + > - If local files are used for playback, ensure that the files are available and the application sandbox path is used for access. For details about how to obtain the application sandbox path, see [Obtaining the Application Development Path](../application-models/application-context-stage.md#obtaining-the-application-development-path). For details about the application sandbox and how to push files to the application sandbox, see [File Management](../file-management/app-sandbox-directory.md). + > + > - If a network playback path is used, you must request the ohos.permission.INTERNET [permission](../security/accesstoken-guidelines.md). + > + > - You can also use **ResourceManager.getRawFd** to obtain the file descriptor of a file packed in the HAP file. For details, see [ResourceManager API Reference](../reference/apis/js-apis-resource-manager.md#getrawfd9). + > + > - The [playback formats and protocols](avplayer-avrecorder-overview.md#supported-formats-and-protocols) in use must be those supported by the system. + +4. Obtain and set the surface ID of the window to display the video. + The application obtains the surface ID from the XComponent. For details about the process, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md). + +5. Call **prepare()** to switch the AVPlayer to the **prepared** state. In this state, you can obtain the duration of the media asset to play and set the scale type and volume. + +6. Call **play()**, **pause()**, **seek()**, and **stop()** to perform video playback control as required. + +7. (Optional) Call **reset()** to reset the AVPlayer. The AVPlayer enters the **idle** state again and you can change the media asset URL. + +8. Call **release()** to switch the AVPlayer to the **released** state. Now your application exits the playback. + + +### Sample Code + + +```ts +import media from '@ohos.multimedia.media'; +import fs from '@ohos.file.fs'; +import common from '@ohos.app.ability.common'; + +export class AVPlayerDemo { + private avPlayer; + private count: number = 0; + private surfaceID: string; // The surfaceID parameter specifies the window used to display the video. Its value is obtained through the XComponent. + + // Set AVPlayer callback functions. + setAVPlayerCallback() { + // Callback function for the seek operation. + this.avPlayer.on('seekDone', (seekDoneTime) => { + console.info(`AVPlayer seek succeeded, seek time is ${seekDoneTime}`); + }) + // Callback function for errors. If an error occurs during the operation on the AVPlayer, reset() is called to reset the AVPlayer. + this.avPlayer.on('error', (err) => { + console.error(`Invoke avPlayer failed, code is ${err.code}, message is ${err.message}`); + this.avPlayer.reset(); // Call reset() to reset the AVPlayer, which enters the idle state. + }) + // Callback function for state changes. + this.avPlayer.on('stateChange', async (state, reason) => { + switch (state) { + case 'idle': // This state is reported upon a successful callback of reset(). + console.info('AVPlayer state idle called.'); + this.avPlayer.release(); // Call release() to release the instance. + break; + case 'initialized': // This state is reported when the AVPlayer sets the playback source. + console.info('AVPlayerstate initialized called.'); + this.avPlayer.surfaceId = this.surfaceID // Set the window to display the video. This setting is not required when a pure audio asset is to be played. + this.avPlayer.prepare().then(() => { + console.info('AVPlayer prepare succeeded.'); + }, (err) => { + console.error(`Invoke prepare failed, code is ${err.code}, message is ${err.message}`); + }); + break; + case 'prepared': // This state is reported upon a successful callback of prepare(). + console.info('AVPlayer state prepared called.'); + this.avPlayer.play(); // Call play() to start playback. + break; + case 'playing': // This state is reported upon a successful callback of play(). + console.info('AVPlayer state playing called.'); + if (this.count !== 0) { + console.info('AVPlayer start to seek.'); + this.avPlayer.seek (this.avPlayer.duration); // Call seek() to seek to the end of the video clip. + } else { + this.avPlayer.pause(); // Call pause() to pause the playback. + } + this.count++; + break; + case 'paused': // This state is reported upon a successful callback of pause(). + console.info('AVPlayer state paused called.'); + this.avPlayer.play(); // Call play() again to start playback. + break; + case 'completed': // This state is reported upon the completion of the playback. + console.info('AVPlayer state completed called.'); + this.avPlayer.stop(); // Call stop() to stop the playback. + break; + case 'stopped': // This state is reported upon a successful callback of stop(). + console.info('AVPlayer state stopped called.'); + this.avPlayer.reset(); // Call reset() to reset the AVPlayer state. + break; + case 'released': + console.info('AVPlayer state released called.'); + break; + default: + console.info('AVPlayer state unknown called.'); + break; } - }, this.failureCallback).catch(this.catchCallback); - // Set the playback source for the player. - videoPlayer.url = fdPath; - - // Set the surface ID to display the video image. - await videoPlayer.setDisplaySurface(surfaceID).then(() => { - console.info('setDisplaySurface success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the prepare API to prepare for playback. - await videoPlayer.prepare().then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the play API to start playback. - await videoPlayer.play().then(() => { - console.info('play success'); - }, this.failureCallback).catch(this.catchCallback); - - // Pause playback. - await videoPlayer.pause().then(() => { - console.info('pause success'); - }, this.failureCallback).catch(this.catchCallback); - - // Use a promise to obtain the video track information communication_dsoftbus. - let arrayDescription; - await videoPlayer.getTrackDescription().then((arrlist) => { - if (typeof (arrlist) != 'undefined') { - arrayDescription = arrlist; - } else { - console.log('video getTrackDescription fail'); - } - }, this.failureCallback).catch(this.catchCallback); - - for (let i = 0; i < arrayDescription.length; i++) { - this.printfDescription(arrayDescription[i]); - } - - // Seek to the 50s position. For details about the input parameters, see the API document. - let seekTime = 50000; - await videoPlayer.seek(seekTime, media.SeekMode.SEEK_NEXT_SYNC).then((seekDoneTime) => { - console.info('seek success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the volume. For details about the input parameters, see the API document. - let volume = 0.5; - await videoPlayer.setVolume(volume).then(() => { - console.info('setVolume success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the playback speed. For details about the input parameters, see the API document. - let speed = media.PlaybackSpeed.SPEED_FORWARD_2_00_X; - await videoPlayer.setSpeed(speed).then(() => { - console.info('setSpeed success'); - }, this.failureCallback).catch(this.catchCallback); - - // Stop playback. - await videoPlayer.stop().then(() => { - console.info('stop success'); - }, this.failureCallback).catch(this.catchCallback); - - // Reset the playback configuration. - await videoPlayer.reset().then(() => { - console.info('reset success'); - }, this.failureCallback).catch(this.catchCallback); - - // Release playback resources. - await videoPlayer.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the related instances to undefined. - videoPlayer = undefined; - surfaceID = undefined; - } -} -``` - -### Normal Playback Scenario - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' -export class VideoPlayerDemo { - // Report an error in the case of a function invocation failure. - failureCallback(error) { - console.info(`error happened,error Name is ${error.name}`); - console.info(`error happened,error Code is ${error.code}`); - console.info(`error happened,error Message is ${error.message}`); - } - - // Report an error in the case of a function invocation exception. - catchCallback(error) { - console.info(`catch error happened,error Name is ${error.name}`); - console.info(`catch error happened,error Code is ${error.code}`); - console.info(`catch error happened,error Message is ${error.message}`); - } - - // Used to print the video track information. - printfDescription(obj) { - for (let item in obj) { - let property = obj[item]; - console.info('key is ' + item); - console.info('value is ' + property); - } - } - - async videoPlayerDemo() { - let videoPlayer = undefined; - let surfaceID = 'test' // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. For details about the document link, see the method of creating the XComponent. - let fdPath = 'fd://' - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile" command. - let path = '/data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile/H264_AAC.mp4'; + }) + } + + // The following demo shows how to use the file system to open the sandbox address, obtain the media file address, and play the media file using the URL attribute. + async avPlayerUrlDemo() { + // Create an AVPlayer instance. + this.avPlayer = await media.createAVPlayer(); + // Set a callback function for state changes. + this.setAVPlayerCallback(); + let fdPath = 'fd://'; + let context = getContext(this) as common.UIAbilityContext; + // Obtain the sandbox address filesDir through UIAbilityContext. The stage model is used as an example. + let pathDir = context.filesDir; + let path = pathDir + '/H264_AAC.mp4'; + // Open the corresponding file address to obtain the file descriptor and assign a value to the URL to trigger the reporting of the initialized state. let file = await fs.open(path); fdPath = fdPath + '' + file.fd; - // Call createVideoPlayer to create a VideoPlayer instance. - await media.createVideoPlayer().then((video) => { - if (typeof (video) != 'undefined') { - console.info('createVideoPlayer success!'); - videoPlayer = video; - } else { - console.info('createVideoPlayer fail!'); - } - }, this.failureCallback).catch(this.catchCallback); - // Set the playback source for the player. - videoPlayer.url = fdPath; - - // Set the surface ID to display the video image. - await videoPlayer.setDisplaySurface(surfaceID).then(() => { - console.info('setDisplaySurface success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the prepare API to prepare for playback. - await videoPlayer.prepare().then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the play API to start playback. - await videoPlayer.play().then(() => { - console.info('play success'); - }, this.failureCallback).catch(this.catchCallback); - - // Stop playback. - await videoPlayer.stop().then(() => { - console.info('stop success'); - }, this.failureCallback).catch(this.catchCallback); - - // Release playback resources. - await videoPlayer.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the related instances to undefined. - videoPlayer = undefined; - surfaceID = undefined; + this.avPlayer.url = fdPath; + } + + // The following demo shows how to use resourceManager to obtain the media file packed in the HAP file and play the media file by using the fdSrc attribute. + async avPlayerFdSrcDemo() { + // Create an AVPlayer instance. + this.avPlayer = await media.createAVPlayer(); + // Set a callback function for state changes. + this.setAVPlayerCallback(); + // Call getRawFd of the resourceManager member of UIAbilityContext to obtain the media asset URL. + // The return type is {fd,offset,length}, where fd indicates the file descriptor address of the HAP file, offset indicates the media asset offset, and length indicates the duration of the media asset to play. + let context = getContext(this) as common.UIAbilityContext; + let fileDescriptor = await context.resourceManager.getRawFd('H264_AAC.mp4'); + // Assign a value to fdSrc to trigger the reporting of the initialized state. + this.avPlayer.fdSrc = fileDescriptor; } } ``` -### Switching to the Next Video Clip - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' -export class VideoPlayerDemo { - // Report an error in the case of a function invocation failure. - failureCallback(error) { - console.info(`error happened,error Name is ${error.name}`); - console.info(`error happened,error Code is ${error.code}`); - console.info(`error happened,error Message is ${error.message}`); - } - - // Report an error in the case of a function invocation exception. - catchCallback(error) { - console.info(`catch error happened,error Name is ${error.name}`); - console.info(`catch error happened,error Code is ${error.code}`); - console.info(`catch error happened,error Message is ${error.message}`); - } - - // Used to print the video track information. - printfDescription(obj) { - for (let item in obj) { - let property = obj[item]; - console.info('key is ' + item); - console.info('value is ' + property); - } - } - - async videoPlayerDemo() { - let videoPlayer = undefined; - let surfaceID = 'test' // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. For details about the document link, see the method of creating the XComponent. - let fdPath = 'fd://' - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile" command. - let path = '/data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile/H264_AAC.mp4'; - let nextPath = '/data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile/MP4_AAC.mp4'; - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - // Call createVideoPlayer to create a VideoPlayer instance. - await media.createVideoPlayer().then((video) => { - if (typeof (video) != 'undefined') { - console.info('createVideoPlayer success!'); - videoPlayer = video; - } else { - console.info('createVideoPlayer fail!'); - } - }, this.failureCallback).catch(this.catchCallback); - // Set the playback source for the player. - videoPlayer.url = fdPath; - - // Set the surface ID to display the video image. - await videoPlayer.setDisplaySurface(surfaceID).then(() => { - console.info('setDisplaySurface success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the prepare API to prepare for playback. - await videoPlayer.prepare().then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the play API to start playback. - await videoPlayer.play().then(() => { - console.info('play success'); - }, this.failureCallback).catch(this.catchCallback); - - // Reset the playback configuration. - await videoPlayer.reset().then(() => { - console.info('reset success'); - }, this.failureCallback).catch(this.catchCallback); - - // Obtain the next video FD address. - fdPath = 'fd://' - let nextFile = await fs.open(nextPath); - fdPath = fdPath + '' + nextFile.fd; - // Set the second video playback source. - videoPlayer.url = fdPath; - - // Call the prepare API to prepare for playback. - await videoPlayer.prepare().then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the play API to start playback. - await videoPlayer.play().then(() => { - console.info('play success'); - }, this.failureCallback).catch(this.catchCallback); - - // Release playback resources. - await videoPlayer.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the related instances to undefined. - videoPlayer = undefined; - surfaceID = undefined; - } -} -``` - -### Looping a Video Clip - -```js -import media from '@ohos.multimedia.media' -import fs from '@ohos.file.fs' -export class VideoPlayerDemo { - // Report an error in the case of a function invocation failure. - failureCallback(error) { - console.info(`error happened,error Name is ${error.name}`); - console.info(`error happened,error Code is ${error.code}`); - console.info(`error happened,error Message is ${error.message}`); - } - - // Report an error in the case of a function invocation exception. - catchCallback(error) { - console.info(`catch error happened,error Name is ${error.name}`); - console.info(`catch error happened,error Code is ${error.code}`); - console.info(`catch error happened,error Message is ${error.message}`); - } - - // Used to print the video track information. - printfDescription(obj) { - for (let item in obj) { - let property = obj[item]; - console.info('key is ' + item); - console.info('value is ' + property); - } - } - - async videoPlayerDemo() { - let videoPlayer = undefined; - let surfaceID = 'test' // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. For details about the document link, see the method of creating the XComponent. - let fdPath = 'fd://' - // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile" command. - let path = '/data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile/H264_AAC.mp4'; - let file = await fs.open(path); - fdPath = fdPath + '' + file.fd; - // Call createVideoPlayer to create a VideoPlayer instance. - await media.createVideoPlayer().then((video) => { - if (typeof (video) != 'undefined') { - console.info('createVideoPlayer success!'); - videoPlayer = video; - } else { - console.info('createVideoPlayer fail!'); - } - }, this.failureCallback).catch(this.catchCallback); - // Set the playback source for the player. - videoPlayer.url = fdPath; - - // Set the surface ID to display the video image. - await videoPlayer.setDisplaySurface(surfaceID).then(() => { - console.info('setDisplaySurface success'); - }, this.failureCallback).catch(this.catchCallback); - - // Call the prepare API to prepare for playback. - await videoPlayer.prepare().then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - // Set the loop playback attribute. - videoPlayer.loop = true; - // Call the play API to start loop playback. - await videoPlayer.play().then(() => { - console.info('play success, loop value is ' + videoPlayer.loop); - }, this.failureCallback).catch(this.catchCallback); - } -} -``` + \ No newline at end of file diff --git a/en/application-dev/media/video-recorder.md b/en/application-dev/media/video-recorder.md deleted file mode 100644 index fd9de91b4bae0591e2a5dc4869455bdd4055943e..0000000000000000000000000000000000000000 --- a/en/application-dev/media/video-recorder.md +++ /dev/null @@ -1,160 +0,0 @@ -# Video Recording Development - -## Introduction - -You can use video recording APIs to capture audio and video signals, encode them, and save them to files. You can start, suspend, resume, and stop recording, and release resources. You can also specify parameters such as the encoding format, encapsulation format, and file path for video recording. - -## Working Principles - -The following figures show the video recording state transition and the interaction with external modules for video recording. - -**Figure 1** Video recording state transition - -![en-us_image_video_recorder_state_machine](figures/en-us_image_video_recorder_state_machine.png) - -**Figure 2** Interaction with external modules for video recording - -![en-us_image_video_recorder_zero](figures/en-us_image_video_recorder_zero.png) - -**NOTE**: When a third-party camera application or system camera calls a JS interface provided by the JS interface layer, the framework layer uses the media service of the native framework to invoke the audio component. Through the audio HDI, the audio component captures audio data, encodes the audio data through software, and saves the encoded audio data to a file. The graphics subsystem captures image data through the video HDI, encodes the image data through the video codec HDI, and saves the encoded image data to a file. In this way, video recording is implemented. - -## Constraints - -Before developing video recording, configure the permissions **ohos.permission.MICROPHONE** and **ohos.permission.CAMERA** for your application. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md). - -## How to Develop - -For details about the APIs, see [VideoRecorder in the Media API](../reference/apis/js-apis-media.md#videorecorder9). - -### Full-Process Scenario - -The full video recording process includes creating an instance, setting recording parameters, starting, pausing, resuming, and stopping recording, and releasing resources. - -```js -import media from '@ohos.multimedia.media' -import mediaLibrary from '@ohos.multimedia.mediaLibrary' -export class VideoRecorderDemo { - private testFdNumber; // Used to save the FD address. - // pathName indicates the passed recording file name, for example, 01.mp4. The generated file address is /storage/media/100/local/files/Video/01.mp4. - // To use the media library, declare the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA. - async getFd(pathName) { - let displayName = pathName; - const mediaTest = mediaLibrary.getMediaLibrary(); - let fileKeyObj = mediaLibrary.FileKey; - let mediaType = mediaLibrary.MediaType.VIDEO; - let publicPath = await mediaTest.getPublicDirectory(mediaLibrary.DirectoryType.DIR_VIDEO); - let dataUri = await mediaTest.createAsset(mediaType, displayName, publicPath); - if (dataUri != undefined) { - let args = dataUri.id.toString(); - let fetchOp = { - selections : fileKeyObj.ID + "=?", - selectionArgs : [args], - } - let fetchFileResult = await mediaTest.getFileAssets(fetchOp); - let fileAsset = await fetchFileResult.getAllObject(); - let fdNumber = await fileAsset[0].open('Rw'); - this.testFdNumber = "fd://" + fdNumber.toString(); - } - } - - // Error callback triggered in the case of an error - failureCallback(error) { - console.info('error happened, error name is ' + error.name); - console.info('error happened, error code is ' + error.code); - console.info('error happened, error message is ' + error.message); - } - - // Error callback triggered in the case of an exception - catchCallback(error) { - console.info('catch error happened, error name is ' + error.name); - console.info('catch error happened, error code is ' + error.code); - console.info('catch error happened, error message is ' + error.message); - } - - async videoRecorderDemo() { - let videoRecorder = null; // videoRecorder is an empty object and assigned with a value after createVideoRecorder is successfully called. - let surfaceID = null; // Used to save the surface ID returned by getInputSurface. - // Obtain the FD address of the video to be recorded. - await this.getFd('01.mp4'); - // Configure the parameters related to video recording based on those supported by the hardware device. - let videoProfile = { - audioBitrate : 48000, - audioChannels : 2, - audioCodec : 'audio/mp4a-latm', - audioSampleRate : 48000, - fileFormat : 'mp4', - videoBitrate : 2000000, - videoCodec : 'video/mp4v-es', - videoFrameWidth : 640, - videoFrameHeight : 480, - videoFrameRate : 30 - } - - let videoConfig = { - audioSourceType : 1, - videoSourceType : 0, - profile : videoProfile, - url : this.testFdNumber, // testFdNumber is generated by getFd. - orientationHint : 0, - location : { latitude : 30, longitude : 130 } - } - // Create a VideoRecorder object. - await media.createVideoRecorder().then((recorder) => { - console.info('case createVideoRecorder called'); - if (typeof (recorder) != 'undefined') { - videoRecorder = recorder; - console.info('createVideoRecorder success'); - } else { - console.info('createVideoRecorder failed'); - } - }, this.failureCallback).catch(this.catchCallback); - - // Call the prepare API to prepare for video recording. - await videoRecorder.prepare(videoConfig).then(() => { - console.info('prepare success'); - }, this.failureCallback).catch(this.catchCallback); - - // Obtain the surface ID, save it, and pass it to camera-related APIs. - await videoRecorder.getInputSurface().then((surface) => { - console.info('getInputSurface success'); - surfaceID = surface; - }, this.failureCallback).catch(this.catchCallback); - - // Video recording depends on camera-related APIs. The following operations can be performed only after the video output start API is invoked. For details about how to call the camera APIs, see the samples. - // Start video recording. - await videoRecorder.start().then(() => { - console.info('start success'); - }, this.failureCallback).catch(this.catchCallback); - - // Pause video recording before the video output stop API of the camera is invoked. - await videoRecorder.pause().then(() => { - console.info('pause success'); - }, this.failureCallback).catch(this.catchCallback); - - // Resume video recording after the video output start API of the camera is invoked. - await videoRecorder.resume().then(() => { - console.info('resume success'); - }, this.failureCallback).catch(this.catchCallback); - - // Stop video recording after the video output stop API of the camera is invoked. - await videoRecorder.stop().then(() => { - console.info('stop success'); - }, this.failureCallback).catch(this.catchCallback); - - // Reset the recording configuration. - await videoRecorder.reset().then(() => { - console.info('reset success'); - }, this.failureCallback).catch(this.catchCallback); - - // Release the video recording resources and camera object resources. - await videoRecorder.release().then(() => { - console.info('release success'); - }, this.failureCallback).catch(this.catchCallback); - - // Set the related object to null. - videoRecorder = undefined; - surfaceID = undefined; - } -} -``` - diff --git a/en/application-dev/media/video-recording.md b/en/application-dev/media/video-recording.md new file mode 100644 index 0000000000000000000000000000000000000000..8eabb4e1aad61f954135832ff2e5439912acdb34 --- /dev/null +++ b/en/application-dev/media/video-recording.md @@ -0,0 +1,237 @@ +# Video Recording + +OpenHarmony provides the AVRecorder for you to develop the video recording service. The AVRecorder supports audio recording, audio encoding, video encoding, audio encapsulation, and video encapsulation. It is applicable to simple video recording scenarios and can be used to generate local video files directly. + +You will learn how to use the AVRecorder to complete the process of starting, pausing, resuming, and stopping recording. + +During application development, you can use the **state** attribute of the AVRecorder to obtain the AVRecorder state or call **on('stateChange')** to listen for state changes. Your code must meet the state machine requirements. For example, **pause()** is called only when the AVRecorder is in the **started** state, and **resume()** is called only when it is in the **paused** state. + +**Figure 1** Recording state transition + +![Recording state change](figures/video-recording-status-change.png) + +For details about the state, see [AVRecorderState](../reference/apis/js-apis-media.md#avrecorderstate9). + +## How to Develop + +> **NOTE** +> +> The AVRecorder only processes video data. To complete video recording, it must work with the video data collection module, which transfers the captured video data to the AVRecorder for data processing through the surface. A typical video data collection module is the camera module, which currently is available only to system applications. For details, see [Camera](../reference/apis/js-apis-camera.md). + +Read [AVRecorder](../reference/apis/js-apis-media.md#avrecorder9) for the API reference. + +1. Create an **AVRecorder** instance. The AVRecorder is the **idle** state. + + ```ts + import media from '@ohos.multimedia.media' + let avRecorder + media.createAVRecorder().then((recorder) => { + avRecorder = recorder + }, (error) => { + console.error('createAVRecorder failed') + }) + ``` + +2. Set the events to listen for. + | Event Type| Description| + | -------- | -------- | + | stateChange | Mandatory; used to listen for changes of the **state** attribute of the AVRecorder.| + | error | Mandatory; used to listen for AVRecorder errors.| + + ```ts + // Callback function for state changes. + avRecorder.on('stateChange', (state, reason) => { + console.info('current state is: ' + state); + }) + // Callback function for errors. + avRecorder.on('error', (err) => { + console.error('error happened, error message is ' + err); + }) + ``` + +3. Set video recording parameters and call **prepare()**. The AVRecorder enters the **prepared** state. + > **NOTE** + > + > Pay attention to the following when configuring parameters: + > + > - In pure video recording scenarios, set only video-related parameters in **avConfig** of **prepare()**. + > If audio-related parameters are configured, the system regards it as audio and video recording. + > + > - The [recording specifications](avplayer-avrecorder-overview.md#supported-formats) in use must be those supported. The video bit rate, resolution, and frame rate are subject to the ranges supported by the hardware device. + > + > - The recording output URL (URL in **avConfig** in the sample code) must be in the format of fd://xx (where xx indicates a file descriptor). You must call [ohos.file.fs](../reference/apis/js-apis-file-fs.md) to implement access to the application file. For details, see [Application File Access and Management](../file-management/app-file-access.md). + + ```ts + let avProfile = { + fileFormat: media.ContainerFormatType.CFT_MPEG_4, // Video file encapsulation format. Only MP4 is supported. + videoBitrate: 200000, // Video bit rate. + videoCodec: media.CodecMimeType.VIDEO_AVC, // Video file encoding format. Both MPEG-4 and AVC are supported. + videoFrameWidth: 640, // Video frame width. + videoFrameHeight: 480, // Video frame height. + videoFrameRate: 30 // Video frame rate. + } + let avConfig = { + videoSourceType: media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, // Video source type. YUV and ES are supported. + profile : this.avProfile, + url: 'fd://35', // Create, read, and write a file by referring to the sample code in Application File Access and Management. + rotation: 0, // Video rotation angle. The default value is 0, indicating that the video is not rotated. The value can be 0, 90, 180, or 270. + } + avRecorder.prepare(avConfig).then(() => { + console.info('avRecorder prepare success') + }, (error) => { + console.error('avRecorder prepare failed') + }) + ``` + +4. Obtain the surface ID required for video recording. + + Call **getInputSurface()**. The returned surface ID is transferred to the video data collection module (video input source), which is the camera module in the sample code. + + The video data collection module obtains the surface based on the surface ID and transmits video data to the AVRecorder through the surface. Then the AVRecorder processes the video data. + + ```ts + avRecorder.getInputSurface().then((surfaceId) => { + console.info('avRecorder getInputSurface success') + }, (error) => { + console.error('avRecorder getInputSurface failed') + }) + ``` + +5. Initialize the video data input source. + + This step is performed in the video data collection module. For the camera module, you need to create a **Camera** instance, obtain the camera list, create a camera input stream, and create a video output stream. For details, see [Recording](camera-recording-case.md). + +6. Start recording. + + Start the input source to input video data, for example, by calling **camera.VideoOutput.start**. Then call **AVRecorder.start()** to switch the AVRecorder to the **started** state. + +7. Call **pause()** to pause recording. The AVRecorder enters the **paused** state. In addition, pause data input in the video data collection module, for example, by calling **camera.VideoOutput.stop**. + +8. Call **resume()** to resume recording. The AVRecorder enters the **started** state again. + +9. Call **stop()** to stop recording. The AVRecorder enters the **stopped** state again. In addition, stop camera recording in the video data collection module. + +10. Call **reset()** to reset the resources. The AVRecorder enters the **idle** state. In this case, you can reconfigure the recording parameters. + +11. Call **release()** to release the resources. The AVRecorder enters the **released** state. In addition, release the video data input source resources (camera resources in this example). + + +## Sample Code + +Refer to the sample code below to complete the process of starting, pausing, resuming, and stopping recording. + + +```ts +import media from '@ohos.multimedia.media' +const TAG = 'VideoRecorderDemo:' +export class VideoRecorderDemo { + private avRecorder; + private videoOutSurfaceId; + private avProfile = { + fileFormat: media.ContainerFormatType.CFT_MPEG_4, // Video file encapsulation format. Only MP4 is supported. + videoBitrate : 100000, // Video bit rate. + videoCodec: media.CodecMimeType.VIDEO_AVC, // Video file encoding format. Both MPEG-4 and AVC are supported. + videoFrameWidth: 640, // Video frame width. + videoFrameHeight: 480, // Video frame height. + videoFrameRate: 30 // Video frame rate. + } + private avConfig = { + videoSourceType: media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, // Video source type. YUV and ES are supported. + profile : this.avProfile, + url: 'fd://35', // Create, read, and write a file by referring to the sample code in Application File Access and Management. + rotation: 0, // Video rotation angle. The default value is 0, indicating that the video is not rotated. The value can be 0, 90, 180, or 270. + } + + // Set AVRecorder callback functions. + setAvRecorderCallback() { + // Callback function for state changes. + this.avRecorder.on('stateChange', (state, reason) => { + console.info(TAG + 'current state is: ' + state); + }) + // Callback function for errors. + this.avRecorder.on('error', (err) => { + console.error(TAG + 'error ocConstantSourceNode, error message is ' + err); + }) + } + + // Complete camera-related preparations. + async prepareCamera() { + // For details on the implementation, see the camera document. + } + + // Start the camera stream output. + async startCameraOutput() { + // Call start of the VideoOutput class to start video output. + } + + // Stop the camera stream output. + async stopCameraOutput() { + // Call stop of the VideoOutput class to stop video output. + } + + // Release the camera instance. + async releaseCamera() { + // Release the instances created during camera preparation. + } + + // Process of starting recording. + async startRecordingProcess() { + // 1. Create an AVRecorder instance. + this.avRecorder = await media.createAVRecorder(); + this.setAvRecorderCallback(); + // 2. Obtain the file descriptor of the recorded file. The obtained file descriptor is passed in to the URL in avConfig. The implementation is omitted here. + // 3. Set recording parameters to complete the preparations. + await this.avRecorder.prepare(this.avConfig); + this.videoOutSurfaceId = await this.avRecorder.getInputSurface(); + // 4. Complete camera-related preparations. + await this.prepareCamera(); + // 5. Start the camera stream output. + await this.startCameraOutput(); + // 6. Start recording. + await this.videoRecorder.start(); + } + + // Process of pausing recording. + async pauseRecordingProcess() { + if (this.avRecorder.state ==='started') { // pause() can be called only when the AVRecorder is in the started state . + await this.avRecorder.pause(); + await this.stopCameraOutput(); // Stop the camera stream output. + } + } + + // Process of resuming recording. + async resumeRecordingProcess() { + if (this.avRecorder.state === 'paused') { // resume() can be called only when the AVRecorder is in the paused state . + await this.startCameraOutput(); // Start camera stream output. + await this.avRecorder.resume(); + } + } + + async stopRecordingProcess() { + // 1. Stop recording. + if (this.avRecorder.state === 'started' + || this.avRecorder.state ==='paused') { // stop() can be called only when the AVRecorder is in the started or paused state. + await this.avRecorder.stop(); + await this.stopCameraOutput(); + } + // 2. Reset the AVRecorder. + await this.avRecorder.reset(); + // 3. Release the AVRecorder instance. + await this.avRecorder.release(); + // 4. After the file is recorded, close the file descriptor. The implementation is omitted here. + // 5. Release the camera instance. + await this.releaseCamera(); + } + + // Complete sample code for starting, pausing, resuming, and stopping recording. + async videoRecorderDemo() { + await this.startRecordingProcess(); // Start recording. + // You can set the recording duration. For example, you can set the sleep mode to prevent code execution. + await this.pauseRecordingProcess(); // Pause recording. + await this.resumeRecordingProcess(); // Resume recording. + await this.stopRecordingProcess(); // Stop recording. + } +} +``` + + \ No newline at end of file diff --git a/en/application-dev/media/volume-management.md b/en/application-dev/media/volume-management.md new file mode 100644 index 0000000000000000000000000000000000000000..f6461d968856c7d865c999ab9c604e5ef718548b --- /dev/null +++ b/en/application-dev/media/volume-management.md @@ -0,0 +1,48 @@ +# Volume Management + +You can use different APIs to manage the system volume and audio stream volume. The system volume and audio stream volume refer to the volume of a OpenHarmony device and the volume of a specified audio stream, respectively. The audio stream volume is restricted by the system volume. + +## System Volume + +The API for managing the system volume is **AudioVolumeManager**. Before using this API, you must call **getVolumeManager()** to obtain an **AudioVolumeManager** instance. Currently, this API can be used to obtain volume information and listen for volume changes. It cannot be used to adjust the system volume. + +```ts +import audio from '@ohos.multimedia.audio'; +let audioManager = audio.getAudioManager(); +let audioVolumeManager = audioManager.getVolumeManager(); +``` + +### Listening for System Volume Changes + +You can set an event to listen for system volume changes. + +```ts +audioVolumeManager.on('volumeChange', (volumeEvent) => { + console.info(`VolumeType of stream: ${volumeEvent.volumeType} `); + console.info(`Volume level: ${volumeEvent.volume} `); + console.info(`Whether to updateUI: ${volumeEvent.updateUi} `); +}); +``` + +### Adjusting the System Volume (for System Applications Only) + +Currently, the system volume is mainly adjusted by using system APIs, which serve the physical volume button and the Settings application. When the user presses the volume button, a system API is called to adjust the system volume, including the volume for media, ringtone, or notification. + +## Audio Stream Volume + +The API for managing the audio stream volume is **setVolume()** in the **AVPlayer** or **AudioRenderer** class. The code snippet below is used for setting the audio stream volume by using the **AVPlayer** class: + +```ts +let volume = 1.0 // Specified volume. The value range is [0.00-1.00]. The value 1 indicates the maximum volume. +avPlayer.setVolume(volume) +``` + +The code snippet below is used for setting the audio stream volume by using the **AudioRenderer** class: + +```ts +audioRenderer.setVolume(0.5).then(data=>{ // The volume range is [0.0-1.0]. + console.info('Invoke setVolume succeeded.'); +}).catch((err) => { + console.error(`Invoke setVolume failed, code is ${err.code}, message is ${err.message}`); +}); +``` diff --git a/en/application-dev/napi/Readme-EN.md b/en/application-dev/napi/Readme-EN.md index b7e5367f1697800cafe3094c5a5a8f2cdb56677e..c6bdba99546cafb30a88b9b39c3cd552dfd4419e 100644 --- a/en/application-dev/napi/Readme-EN.md +++ b/en/application-dev/napi/Readme-EN.md @@ -1,6 +1,65 @@ # Native APIs +Native APIs are a set of native development interfaces and tools provided by the OHOS SDK. It enables the use of C or C++ code to implement key application functionalities. Native APIs provide part of basic underlying capabilities of OHOS, such as libc, graphics library, window system, multimedia, and compression library. They do not provide complete OHOS platform capabilities as JS APIs do. Native APIs are compiled into a dynamic library before being packed into the application. +## Native API Composition + +### Native API Directory Structure + +Native APIs are stored in the **$(SDK_ROOT)/native** directory of the SDK. They consist of the following parts: + +|Directory|Description| +|--|--| +|build|Used to build the toolchain.cmake script of the dynamic library in the application. The **ohos.toolchain.cmake** file in this directory defines OHOS cross compilation options.| +|build-tools|Stores build tools, such as CMake.| +|docs|Stores Native API reference documents, which is extracted from the header files using Doxgen.| +|llvm|Stores LLVM, a cross compiler that supports OHOS ABIs.| +|sysroot|Stores dependent files of build links, including header files and dynamic libraries.| + +### Native APIs + +|Category|Function|Introduced In| +|--|--|--| +|C standard library|C standard library interfaces based on musl. Currently, more than 1500 interfaces are provided.|API version 8| +|C++ standard library|C++ runtime library libc++_shared. This library must be packed or statically linked to the application during packing.|API version 8| +|Log|HiLog interfaces for printing logs to the system|API version 8| +|napi|A group of Node-APIs provided by ArkUI to facilitate access to the JS application environment during application development. Node-APIs are part of native APIs.|API version 8| +|XComponent|Provides surface and touchscreen event interfaces for developing high-performance graphics applications.|API version 8| +|libuv|Third-party asynchronous I/O library integrated by ArkUI.|API version 8| +|libz|zlib library that provides basic compression and decompression interfaces.|API version 8| +|Drawing|2D graphics library that can be used for drawing on the surface.|API version 8| +|OpenGL|OpenGL 3.0 interfaces.|API version 8| +|Rawfile|Application resource access interfaces that can be used to read various resources packed in the application.|API version 8| +|OpenSLES|Interface library used for 2D and 3D audio acceleration.|API version 8| +|Mindspore|AI model interface library.|API version 9| +|Bundle management|Bundle service interfaces that can be used to query bundle information of the application.|API version 8| + +Some native APIs use open source standards. For details, see [Native Standard Libraries Supported by OpenHarmony](https://docs.openharmony.cn/pages/v3.1/en/application-dev/reference/native-lib/third_party_libc/musl.md/) and [Node_API](https://docs.openharmony.cn/pages/v3.1/en/application-dev/reference/native-lib/third_party_napi/napi.md/). + +## Usage Guidelines + +### Scenarios Where Native APIs Are Recommended + +You can use native APIs when you want to: + +1. Develop performance-sensitive code in computing-intensive scenarios such as gaming and physical simulation. +2. Reuse the existing C or C++ library. +3. Customize libraries related to CPU features, such as neon acceleration. + +### Scenarios Where Native APIs Are Not Recommended + +You do not need to use native APIs when you want to: + +1. Write a native OHOS application. +2. Develop an application compatible on as many OHOS devices as possible. + +# Native API References + +- [Native API hello world]() + + This sample shows how to develop a hello native API library, which can display strings obtained from the hello library on the TS page. - [Using Native APIs in Application Projects](napi-guidelines.md) + + This document describes how to use native APIs to interact with modules, interfaces, and asynchronous tasks in JS. - [Drawing Development](drawing-guidelines.md) - [Raw File Development](rawfile-guidelines.md) - [Native Window Development](native-window-guidelines.md) diff --git a/en/application-dev/napi/drawing-guidelines.md b/en/application-dev/napi/drawing-guidelines.md index a48a081a3e69ea8259727efd343264f80c6cc284..22d85aec0fe405e47cd92abeafa94ba5e7b7ed5f 100644 --- a/en/application-dev/napi/drawing-guidelines.md +++ b/en/application-dev/napi/drawing-guidelines.md @@ -189,7 +189,7 @@ The following steps describe how to use the text drawing and display feature of OH_Drawing_CreateFontCollection()); OH_Drawing_TypographyHandlerPushTextStyle(handler, txtStyle); // Set the text content. - const char* text = "OpenHarmony\n"; + const char* text = "Hello World\n"; OH_Drawing_TypographyHandlerAddText(handler, text); OH_Drawing_TypographyHandlerPopTextStyle(handler); OH_Drawing_Typography* typography = OH_Drawing_CreateTypography(handler); diff --git a/en/application-dev/napi/figures/rawfile1.png b/en/application-dev/napi/figures/rawfile1.png new file mode 100644 index 0000000000000000000000000000000000000000..9f29f7875cd983f967b7a3b27b5898bfce76c9f3 Binary files /dev/null and b/en/application-dev/napi/figures/rawfile1.png differ diff --git a/en/application-dev/napi/napi-guidelines.md b/en/application-dev/napi/napi-guidelines.md index 4448869d84d51b0fb17836e69af14ad28433f395..a12e23d9f48492911dff8476a1e5301736704d85 100644 --- a/en/application-dev/napi/napi-guidelines.md +++ b/en/application-dev/napi/napi-guidelines.md @@ -1,645 +1,184 @@ -# Using Native APIs in Application Projects +# Using N-APIs in Application Projects -OpenHarmony applications use JavaScript (JS) when calling native APIs. The native APIs (NAPIs) provided by the [ace_napi](https://gitee.com/openharmony/arkui_napi/tree/master) repository are used to implement interaction with JS. The names of the NAPIs are the same as those in the third-party **Node.js**. For details about the interfaces supported, see **libnapi.ndk.json** in the ace_napi repository. +In OpenHarmony, you can use the N-APIs in C APIs to implement interaction between ArkTS/TS/JS and C/C++. The N-API names are the same as those in the third-party **Node.js**. Currently, OpenHarmony supports some N-APIs. For details about the APIs supported, see [arkui_napi](https://gitee.com/openharmony/arkui_napi/blob/master/libnapi.ndk.json). ## How to Develop -The DevEco Studio has a default project that uses NAPIs. You can choose **File** > **New** > **Create Project** to create a **Native C++** project. The **cpp** directory is generated in the **main** directory. You can use the NAPIs provided by the **ace_napi** repository for development. - -You can import the native .so that contains the JS processing logic. For example, **import hello from 'libhello.so'** to use the **libhello.so** capability. Then, the JS object created using the NAPI can be passed to the **hello** object of the application to call the native capability. - -## Development Guidelines - -### Registration - -* Add **static** to the **nm_register_func** function to prevent symbol conflicts with other .so files. -* The name of the module registration entry, that is, the function decorated by **\_\_attribute\_\_((constructor))**, must be unique. - -### .so Naming Rules - -The .so file names must comply with the following rules: - -* Each module has a .so file. -* The **nm_modname** field in **napi_module** must be the same as the module name. For example, if the module name is **hello**, name the .so file **libhello.so**. The sample code for importing the .so file is **import hello from 'libhello.so'**. - -### JS Objects and Threads - -The Ark engine prevents NAPIs from being called to operate JS objects in non-JS threads. Otherwise, the application will crash. Observe the following rules: - -* The NAPIs can be used only in JS threads. -* **env** is bound to a thread and cannot be used across threads. The JS object created by a NAPI can be used only in the thread, in which the object is created, that is, the JS object is bound to the **env** of the thread. - -### Importing Header Files - -Before using NAPI objects and methods, include **napi/native_api.h**. Otherwise, if only the third-party library header file is included, an error will be reporting, indicating that the interface cannot be found. - -### napi_create_async_work - -**napi_create_async_work** has two callbacks: - -* **execute**: processes service logic asynchronously. This callback is not executed by a JS thread; therefore, it cannot call any NAPI. The return value of **execute** is processed by the **complete** callback. - -* **complete**: calls the NAPI to encapsulate the return value of **execute** into a JS object and return it for processing. This callback is executed by a JS thread. - -```c++ -napi_status napi_create_async_work(napi_env env, - napi_value async_resource, - napi_value async_resource_name, - napi_async_execute_callback execute, - napi_async_complete_callback complete, - void* data, - napi_async_work* result) -``` +The DevEco Studio provides a default project that uses N-APIs. You can choose **File** > **New** > **Create Project** to create a Native C++ project. After the project is created, the **cpp** directory is generated in the **entry/src/main** directory. You can use the N-APIs to develop C/C++ code (native code). +You can import the native .so file for ArkTS/TS/JS programming. For example, you can **import hello from 'libhello.so'** to use the **libhello.so** capability and pass the ArkTS/TS/JS object named **hello** to the ArkTS/TS/JS APIs of the application. You can use this object to invoke the N-APIs in **cpp**. +## Basic Features +The N-APIs implement interaction between ArkTS/TS/JS and C/C++. The following provides two **HelloWorld** project examples: +1. Define an N-API method **Add()**, which is called by ArkTS with two numbers passed in. The N-API **Add**() method adds the two numbers and returns the result to ArkTS. +2. Define an N-API method named **NativeCallArkTS**, which is called by ArkTS with an ArkTS function passed in. The **NativeCallArkTS** method invokes this ArkTS function and returns the result to ArkTS. -## Encapsulating Synchronous and Asynchronous APIs for the Storage Module +The following describes: +1. How an ArkTS method invokes a C++ method. +2. How a C++ method invokes an ArkTS method. -### Overview +The project has the following files: +- **entry\src\main\cpp\hello.cpp**: contains the N-API logic. +- **entry\src\main\ets\pages\index.ets**: contains the ArkTS logic. +- **entry\src\main\cpp\types\libentry\index.d.ts**: contains the declaration of the N-APIs exposed to ArkTS. -This example shows how to encapsulate the synchronous and asynchronous APIs of the **Storage** module. The **Storage** module implements the functions of storing, obtaining, deleting, and clearing data. +The following provides the comments for the files. Other parts in the project are the same as those in the native default project. -### API Declaration - -```typescript -import { AsyncCallback } from './basic'; -declare namespace storage { - function get(key: string, callback: AsyncCallback): void; - function get(key: string, defaultValue: string, callback: AsyncCallback): void; - function get(key: string, defaultValue?: string): Promise; - function set(key: string, value: string, callback: AsyncCallback): void; - function remove(key: string, callback: AsyncCallback): void; - function clear(callback: AsyncCallback): void; - function getSync(key: string, defaultValue?: string): string; - function setSync(key: string, value: string): void; - function removeSync(key: string): void; - function clearClear(): void; -} -export default storage; -``` - - - -### Implementation - -You can obtain the complete code from sample/native_module_storage/ in the [OpenHarmony/arkui_napi](https://gitee.com/openharmony/arkui_napi/tree/master) repository. - -**1. Register the module.** - -Register four synchronous APIs (**getSync**, **setSync**, **removeSync**, and **clearSync**) and four asynchronous APIs (**get**, **set**, **remove**, and **clear**). - -```c++ -/*********************************************** - * Module export and register - ***********************************************/ -static napi_value StorageExport(napi_env env, napi_value exports) -{ - napi_property_descriptor desc[] = { - DECLARE_NAPI_FUNCTION("get", JSStorageGet), - DECLARE_NAPI_FUNCTION("set", JSStorageSet), - DECLARE_NAPI_FUNCTION("remove", JSStorageDelete), - DECLARE_NAPI_FUNCTION("clear", JSStorageClear), - - DECLARE_NAPI_FUNCTION("getSync", JSStorageGetSync), - DECLARE_NAPI_FUNCTION("setSync", JSStorageSetSync), - DECLARE_NAPI_FUNCTION("deleteSync", JSStorageDeleteSync), - DECLARE_NAPI_FUNCTION("clearSync", JSStorageClearSync), - }; - NAPI_CALL(env, napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc)); - return exports; -} +```C++ +// entry\src\main\cpp\hello.cpp +// Include the N-API header file. +#include "napi/native_api.h" -// Storage module -static napi_module storage_module = {.nm_version = 1, - .nm_flags = 0, - .nm_filename = nullptr, - .nm_register_func = StorageExport, - .nm_modname = "storage", - .nm_priv = ((void*)0), - .reserved = {0}}; - -// Register the storage module -extern "C" __attribute__((constructor)) void StorageRegister() +// N-API method, which has only two input parameters. You do not need to modify them. +// napi_env is the current running context. +// napi_callback_info contains related information, including parameters passed from ArkTS. +static napi_value Add(napi_env env, napi_callback_info info) { - napi_module_register(&storage_module); -} -``` - -**2. Implement getSync.** - -The **getSync** function registered for the **Storage** module is **JSStorageGetSync**. Obtain data from **gKeyValueStorage**, create a string object, and return the object created. + // Number of parameters to be obtained from ArkTS. napi_value can be regarded as the representation of the ArkTS value in the N-API method. + size_t argc = 2; + napi_value args[2] = {nullptr}; + + // From info(), obtain the parameters passed from ArkTS. In this example, two ArkTS parameters, arg[0] and arg[1], are obtained. + napi_get_cb_info(env, info, &argc, args , nullptr, nullptr); -```c -static napi_value JSStorageGetSync(napi_env env, napi_callback_info info) -{ - GET_PARAMS(env, info, 2); - NAPI_ASSERT(env, argc >= 1, "requires 1 parameter"); - char key[32] = {0}; - size_t keyLen = 0; - char value[128] = {0}; - size_t valueLen = 0; - - // Parse parameters. - for (size_t i = 0; i < argc; i++) { - napi_valuetype valueType; - napi_typeof(env, argv[i], &valueType); - - if (i == 0 && valueType == napi_string) { - napi_get_value_string_utf8(env, argv[i], key, 31, &keyLen); - } else if (i == 1 && valueType == napi_string) { - napi_get_value_string_utf8(env, argv[i], value, 127, &valueLen); - break; - } else { - NAPI_ASSERT(env, false, "type mismatch"); - } - } + // Convert the obtained ArkTS parameters to the type that can be processed by N-API. In this example, the two numbers passed from ArkTS are converted to the double type. + double value0; + napi_get_value_double(env, args[0], &value0); - // Service logic for obtaining data. This example simply obtains data from a global variable. - auto itr = gKeyValueStorage.find(key); - napi_value result = nullptr; - if (itr != gKeyValueStorage.end()) { - // Use the data obtained to create a JS object of the string type. - napi_create_string_utf8(env, itr->second.c_str(), itr->second.length(), &result); - } else if (valueLen > 0) { - // If no data is obtained, use the default value to create a JS object. - napi_create_string_utf8(env, value, valueLen, &result); - } else { - NAPI_ASSERT(env, false, "key does not exist"); - } - // Return the result. - return result; + double value1; + napi_get_value_double(env, args[1], &value1); + + // N-API service logic, which is adding two numbers in this example. + double nativeSum = value0 + value1; + + // Convert the N-API service logic processing result to an ArkTS value and return the value to ArkTS. + napi_value sum; + napi_create_double(env, nativeSum , &sum); + return sum; } -``` - -**3. Implement get().** - -The **get** function registered for the **Storage** module is **JSStorageGet**. -```c -static napi_value JSStorageGet(napi_env env, napi_callback_info info) +static napi_value NativeCallArkTS(napi_env env, napi_callback_info info) { - GET_PARAMS(env, info, 3); - NAPI_ASSERT(env, argc >= 1, "requires 1 parameter"); - - // StorageAsyncContext is a custom class used to store data during execution. - StorageAsyncContext* asyncContext = new StorageAsyncContext(); - - asyncContext->env = env; - - // Obtain parameters. - for (size_t i = 0; i < argc; i++) { - napi_valuetype valueType; - napi_typeof(env, argv[i], &valueType); - - if (i == 0 && valueType == napi_string) { - napi_get_value_string_utf8(env, argv[i], asyncContext->key, 31, &asyncContext->keyLen); - } else if (i == 1 && valueType == napi_string) { - napi_get_value_string_utf8(env, argv[i], asyncContext->value, 127, &asyncContext->valueLen); - } else if (i == 1 && valueType == napi_function) { - napi_create_reference(env, argv[i], 1, &asyncContext->callbackRef); - break; - } else if (i == 2 && valueType == napi_function) { - napi_create_reference(env, argv[i], 1, &asyncContext->callbackRef); - } else { - NAPI_ASSERT(env, false, "type mismatch"); - } - } - + // Number of parameters to be obtained from ArkTS. napi_value can be regarded as the representation of the ArkTS value in the N-API method. + size_t argc = 1; + napi_value args[1] = {nullptr}; + + // From info(), obtain the parameters passed from ArkTS. In this example, one ArkTS parameter, arg[0], is obtained. + napi_get_cb_info(env, info, &argc, args , nullptr, nullptr); + + // Create an ArkTS number as the input parameter of the ArkTS function. + napi_value argv = nullptr; + napi_create_int32(env, 10, &argv); + napi_value result = nullptr; - - // Determine whether promise or callback is used based on the parameters. - if (asyncContext->callbackRef == nullptr) { - // Create a promise. - napi_create_promise(env, &asyncContext->deferred, &result); - } else { - napi_get_undefined(env, &result); - } - - napi_value resource = nullptr; - napi_create_string_utf8(env, "JSStorageGet", NAPI_AUTO_LENGTH, &resource); - - napi_create_async_work( - env, nullptr, resource, - // Callback 1: This callback contains the service logic to be asynchronously executed and is asynchronously executed by the NAPI. Do not operate JS objects using the NAPI because the execution is asynchronous. - [](napi_env env, void* data) { - StorageAsyncContext* asyncContext = (StorageAsyncContext*)data; - auto itr = gKeyValueStorage.find(asyncContext->key); - if (itr != gKeyValueStorage.end()) { - strncpy_s(asyncContext->value, 127, itr->second.c_str(), itr->second.length()); - asyncContext->status = 0; - } else { - asyncContext->status = 1; - } - }, - // Callback 2: This callback is invoked after callback 1 is complete. The JS thread invokes the callback passed in. - [](napi_env env, napi_status status, void* data) { - StorageAsyncContext* asyncContext = (StorageAsyncContext*)data; - napi_value result[2] = {0}; - if (!asyncContext->status) { - napi_get_undefined(env, &result[0]); - napi_create_string_utf8(env, asyncContext->value, strlen(asyncContext->value), &result[1]); - } else { - napi_value message = nullptr; - napi_create_string_utf8(env, "key does not exist", NAPI_AUTO_LENGTH, &message); - napi_create_error(env, nullptr, message, &result[0]); - napi_get_undefined(env, &result[1]); - } - if (asyncContext->deferred) { - // If a promise is used, check the result of callback 1. - if (!asyncContext->status) { - // Triggered when callback 1 is successful (status is 1), that is, to invoke the callback passed in then in the promise. - napi_resolve_deferred(env, asyncContext->deferred, result[1]); - } else { - // Triggered when callback 1 fails (status is 0), that is, to invoke the callback passed in catch in the promise. - napi_reject_deferred(env, asyncContext->deferred, result[0]); - } - } else { - // If a callback is used, use napi_call_function to invoke the callback to return the result. - napi_value callback = nullptr; - napi_value returnVal; - napi_get_reference_value(env, asyncContext->callbackRef, &callback); - napi_call_function(env, nullptr, callback, 2, result, &returnVal); - napi_delete_reference(env, asyncContext->callbackRef); - } - napi_delete_async_work(env, asyncContext->work); - delete asyncContext; - }, - (void*)asyncContext, &asyncContext->work); - napi_queue_async_work(env, asyncContext->work); - + // Invoke the ArkTS function in the N-API method, save the return value in result, and return result to ArkTS. + napi_call_function(env, nullptr, args[0], 1, &argv, &result); + return result; } -``` -**JS Sample Code** - -```js -import storage from 'libstorage.so'; - -export default { - testGetSync() { - const name = storage.getSync('name'); - console.log('name is ' + name); - }, - testGet() { - storage.get('name') - .then(date => { - console.log('name is ' + data); - }) - .catch(error => { - console.log('error: ' + error); - }); - } -} -``` - - - -## Binding Native and JS Objects for the NetServer Module - -### Overview - -This example shows how to implement the **on**, **off**, and **once** methods and bind C++ and JS objects using **wrap()**. The **NetServer** module implements the network service. - -### API Declaration - -```typescript -export class NetServer { - function start(port: number): void; - function stop(): void; - function on('start' | 'stop', callback: Function): void; - function once('start' | 'stop', callback: Function): void; - function off('start' | 'stop', callback: Function): void; -} -``` - -### Implementation - -You can obtain the complete code from **sample/native_module_netserver/** in the [OpenHarmony/arkui_napi](https://gitee.com/openharmony/arkui_napi/tree/master) repository. - -**1. Register the module.** - -```c -static napi_value NetServer::Export(napi_env env, napi_value exports) +EXTERN_C_START +// Init() hooks native methods, such as Add and NativeCallArkTS, in exports. exports is the ArkTS object obtained after you import the native capabilities. +static napi_value Init(napi_env env, napi_value exports) { - const char className[] = "NetServer"; - napi_property_descriptor properties[] = { - DECLARE_NAPI_FUNCTION("start", JS_Start), - DECLARE_NAPI_FUNCTION("stop", JS_Stop), - DECLARE_NAPI_FUNCTION("on", JS_On), - DECLARE_NAPI_FUNCTION("once", JS_Once), - DECLARE_NAPI_FUNCTION("off", JS_Off), + // Function description struct. The third parameter "Add" is the N-API method. + // The first parameter "add" is the name of the ArkTS method. + napi_property_descriptor desc[] = { + { "add", nullptr, Add, nullptr, nullptr, nullptr, napi_default, nullptr }, + { "nativeCallArkTS", nullptr, NativeCallArkTS, nullptr, nullptr, nullptr, napi_default, nullptr }, }; - napi_value netServerClass = nullptr; - - napi_define_class(env, className, sizeof(className), JS_Constructor, nullptr, countof(properties), properties, - &netServerClass); - - napi_set_named_property(env, exports, "NetServer", netServerClass); - + // Hook the N-API method to the ArkTS object exports. + napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc); return exports; } -``` - -**2. Bind C++ and JS objects in a constructor.** +EXTERN_C_END -```c -napi_value NetServer::JS_Constructor(napi_env env, napi_callback_info cbinfo) -{ - napi_value thisVar = nullptr; - void* data = nullptr; - napi_get_cb_info(env, cbinfo, nullptr, nullptr, &thisVar, &data); - - // C++ native object to be mapped to the JS object. - NetServer* netServer = new NetServer(env, thisVar); - - // Use napi_wrap to bind netServer and thisVar (JS object created). - napi_wrap( - env, thisVar, netServer, - // The JS object is automatically released by the engine. When the JS object is released, the callback is triggered to release netServer. - [](napi_env env, void* data, void* hint) { - printf("NetServer::Destructor\n"); - NetServer* netServer = (NetServer*)data; - delete netServer; - }, - nullptr, nullptr); - - return thisVar; -} -``` - -**3. Obtain a C++ object from a JS object.** +// Information about the module that loads the data. Record information such as the Init() function and module name. +static napi_module demoModule = { + .nm_version =1, + .nm_flags = 0, + .nm_filename = nullptr, + .nm_register_func = Init, + .nm_modname = "entry", + .nm_priv = ((void*)0), + .reserved = { 0 }, +}; -```c -napi_value NetServer::JS_Start(napi_env env, napi_callback_info cbinfo) +// When the .so file is opened, the function is automatically called to register the demoModule module. +extern "C" __attribute__((constructor)) void RegisterHelloModule(void) { - size_t argc = 1; - napi_value argv[1] = {0}; - napi_value thisVar = nullptr; - void* data = nullptr; - napi_get_cb_info(env, cbinfo, &argc, argv, &thisVar, &data); - - NetServer* netServer = nullptr; - // Obtain the C++ object from thisVar using napi_unwrap. - napi_unwrap(env, thisVar, (void**)&netServer); - - NAPI_ASSERT(env, argc >= 1, "requires 1 parameter"); - - napi_valuetype valueType; - napi_typeof(env, argv[0], &valueType); - NAPI_ASSERT(env, valueType == napi_number, "type mismatch for parameter 1"); - - int32_t port = 0; - napi_get_value_int32(env, argv[0], &port); - - // Start the netServer service. - netServer->Start(port); - - napi_value result = nullptr; - napi_get_undefined(env, &result); - return result; + napi_module_register(&demoModule); } ``` -After **netServer->Start** is executed, call back the **start** event registered by **on()**. - -```c -int NetServer::Start(int port) -{ - printf("NetServer::Start thread_id: %ld \n", uv_thread_self()); - - struct sockaddr_in addr; - int r; - - uv_ip4_addr("0.0.0.0", port, &addr); - - r = uv_tcp_init(loop_, &tcpServer_); - if (r) { - fprintf(stderr, "Socket creation error\n"); - return 1; - } - - r = uv_tcp_bind(&tcpServer_, (const struct sockaddr*)&addr, 0); - if (r) { - fprintf(stderr, "Bind error\n"); - return 1; - } - - r = uv_listen((uv_stream_t*)&tcpServer_, SOMAXCONN, OnConnection); - if (r) { - fprintf(stderr, "Listen error %s\n", uv_err_name(r)); - return 1; +```js +// entry\src\main\ets\pages\index.ets + +import hilog from '@ohos.hilog'; +// Import the native capabilities. +import entry from 'libentry.so' + +@Entry +@Component +struct Index { + + build() { + Row() { + Column() { + // The first button calls the add() method, which uses the N-API Add method to add the two numbers. + Button('ArkTS call C++') + .fontSize(50) + .fontWeight(FontWeight.Bold) + .onClick(() => { + hilog.isLoggable(0x0000, 'testTag', hilog.LogLevel.INFO); + hilog.info(0x0000, 'testTag', 'Test NAPI 2 + 3 = %{public}d', entry.add(2, 3)); + }) + // The second button calls the nativeCallArkTS() method, which uses the N-API NativeCallArkTS method to execute the ArkTS function. + Button('C++ call ArkTS') + .fontSize(50) + .fontWeight(FontWeight.Bold) + .onClick(() => { + hilog.isLoggable(0x0000, 'testTag', hilog.LogLevel.INFO); + let ret = entry.nativeCallArkTS((value)=>{return value * 2;}); + hilog.info(0x0000, 'testTag', 'Test NAPI nativeCallArkTS ret = %{public}d', ret); + }) + } + .width('100%') } - - // Trigger the start event after the service is started. - Emit("start", nullptr); - - return 0; + .height('100%') + } } -``` - -**4. Call on() to register an event observer.** - -```c -napi_value NetServer::JS_On(napi_env env, napi_callback_info cbinfo) -{ - size_t argc = 2; - napi_value argv[2] = {0}; - napi_value thisVar = 0; - void* data = nullptr; - napi_get_cb_info(env, cbinfo, &argc, argv, &thisVar, &data); - - NetServer* netServer = nullptr; - // Obtain the NetServer pointer using napi_unwrap. - napi_unwrap(env, thisVar, (void**)&netServer); - - NAPI_ASSERT(env, argc >= 2, "requires 2 parameter"); - // Verify the parameter type. - napi_valuetype eventValueType; - napi_typeof(env, argv[0], &eventValueType); - NAPI_ASSERT(env, eventValueType == napi_string, "type mismatch for parameter 1"); - - napi_valuetype eventHandleType; - napi_typeof(env, argv[1], &eventHandleType); - NAPI_ASSERT(env, eventHandleType == napi_function, "type mismatch for parameter 2"); - - char type[64] = {0}; - size_t typeLen = 0; - - napi_get_value_string_utf8(env, argv[0], type, 63, &typeLen); - - // Register the event handler. - netServer->On((const char*)type, argv[1]); - - napi_value result = nullptr; - napi_get_undefined(env, &result); - return result; -} ``` -**JS Sample Code** - -```javascript -import { NetServer } from 'libnetserver.so'; - -export default { - testNetServer() { - var netServer = new NetServer(); - netServer.on('start', (event) => {}); - netServer.start(1000); // The port number is 1000. After start is executed, invoke the start callback registered. - } -} +```js +// entry\src\main\cpp\types\libentry\index.d.ts +// Declare the N-APIs exposed to ArkTS. +export const add: (a: number, b: number) => number; +export const nativeCallArkTS: (a: object) => number; ``` +## Development Guidelines +### Registration -## Calling Back a JS API in a Non-JS Thread - -### Overview - -This example describes how to invoke a JS callback in a non-JS thread. For example, a sensor listener is registered for a JS application. The sensor data is reported by an SA. When the SA invokes the client through Inter-Process Communication (IPC), the execution thread is an IPC thread, which is different from the JS thread of the SA. In this case, the JS callback must be thrown to the JS thread to execute. Otherwise, the application will crash. - -### Implementation - -You can obtain the complete code from **sample/native_module_callback/** in the [OpenHarmony/arkui_napi](https://gitee.com/openharmony/arkui_napi/tree/master) repository. - -**1. Register the module.** - -Register the **test** API to pass in a parameter. - -```c++ -/*********************************************** - * Module export and register - ***********************************************/ -static napi_value CallbackExport(napi_env env, napi_value exports) -{ - static napi_property_descriptor desc[] = { - DECLARE_NAPI_FUNCTION("test", JSTest) - }; - NAPI_CALL(env, napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc)); - return exports; -} - -// Define the callback. -static napi_module callbackModule = { - .nm_version = 1, - .nm_flags = 0, - .nm_filename = nullptr, - .nm_register_func = CallbackExport, - .nm_modname = "callback", - .nm_priv = ((void*)0), - .reserved = { 0 }, -}; - -// Register the callback. -extern "C" __attribute__((constructor)) void CallbackTestRegister() -{ - napi_module_register(&callbackModule); -} -``` - -**2. Obtain the loop in env and throw the task to a JS thread.** - -```c++ -#include +* To prevent conflicts with symbols in the .so file, add "static" to the function (such as the Init function) corresponding to **nm_register_func**. +* The entry of module registration, that is, the function name modified by **\_\_attribute\_\_((constructor))** (for example, the **RegisterHelloModule** function), must be unique. -#include "napi/native_api.h" -#include "napi/native_node_api.h" +### .so Naming Rules -#include "uv.h" +The .so file names must comply with the following rules: -struct CallbackContext { - napi_env env = nullptr; - napi_ref callbackRef = nullptr; - int retData = 0; -}; +* Each module has a .so file. +* The **nm_modname** field in **napi_module** must be the same as the module name. For example, if the module name is **hello**, name the .so file **libhello.so**. The sample code for importing the .so file is **import hello from 'libhello.so'**. -void callbackTest(CallbackContext* context) -{ - uv_loop_s* loop = nullptr; - // Save the env when the JS callback is registered. Obtain the loop of the JS thread from env. - napi_get_uv_event_loop(context->env, &loop); - - // Create uv_work_t to transfer private data (int type 1 in this example). Note that memory must be released after the callback is complete. The logic for generating the returned data is omitted here. - uv_work_t* work = new uv_work_t; - context->retData = 1; - work->data = (void*)context; - - // Call the libuv API to throw the JS task to the loop for execution. - uv_queue_work( - loop, - work, - // This callback is executed in another common thread to process tasks asynchronously. After the callback is complete, execute the next callback. In this scenario, this callback does not need to execute any task. - [](uv_work_t* work) {}, - // This callback is executed in the JS thread bound to env. - [](uv_work_t* work, int status) { - CallbackContext* context = (CallbackContext*)work->data; - napi_handle_scope scope = nullptr; - // Open the handle scope to manage the lifecycle of napi_value. Otherwise, memory leakage occurs. - napi_open_handle_scope(context->env, &scope); - if (scope == nullptr) { - return; - } - - // Call the NAPIs. - napi_value callback = nullptr; - napi_get_reference_value(context->env, context->callbackRef, &callback); - napi_value retArg; - napi_create_int32(context->env, context->retData, &retArg); - napi_value ret; - napi_call_function(context->env, nullptr, callback, 1, &retArg, &ret); - napi_delete_reference(context->env, context->callbackRef); - - // Close the handle scope to release napi_value. - napi_close_handle_scope(context->env, scope); - - // Release the work pointer. - if (work != nullptr) { - delete work; - } - - delete context; - } - ); -} +### Constraints on JS Object Threads -static napi_value JSTest(napi_env env, napi_callback_info info) -{ - size_t argc = 1; - napi_value argv[1] = { 0 }; - napi_value thisVar = nullptr; - void* data = nullptr; - napi_get_cb_info(env, info, &argc, argv, &thisVar, &data); - - // Obtain the first input parameter, that is, the callback to be invoked subsequently. - napi_valuetype valueType = napi_undefined; - napi_typeof(env, argv[0], &valueType); - if (valueType != napi_function) { - return nullptr; - } - // Save the env and callback for subsequent transfer. - auto asyncContext = new CallbackContext(); - asyncContext->env = env; - napi_create_reference(env, argv[0], 1, &asyncContext->callbackRef); - // Simulate the logic for throwing a task to a non-JS thread. - std::thread testThread(callbackTest, asyncContext); - testThread.detach(); - - return nullptr; -} -``` +The ArkCompiler protects JS object threads. Improper use may cause an application crash. Observe the following rules: -**JS Sample Code** +* The N-APIs can be used only by JS threads. +* **env** is bound to a thread and cannot be used across threads. The JS object created by an N-API can be used only in the thread, in which the object is created, that is, the JS object is bound to the **env** of the thread. -```js -import callback from 'libcallback.so'; +### Header File Import -export default { - testcallback() { - callback.test((data) => { - console.error('test result = ' + data) - }) - } -} -``` +Import **napi/native_api.h**. Otherwise, an error indicating that the N-API cannot be found will be reported. diff --git a/en/application-dev/napi/neural-network-runtime-guidelines.md b/en/application-dev/napi/neural-network-runtime-guidelines.md index 9ae694fc12449634a75fae260050188b68e97804..0c86fd05801197bdd63a885c2071f258687a768e 100644 --- a/en/application-dev/napi/neural-network-runtime-guidelines.md +++ b/en/application-dev/napi/neural-network-runtime-guidelines.md @@ -244,7 +244,7 @@ The development process of the Neural Network Runtime consists of three phases: // Create a compilation instance to pass the model to the underlying hardware for compilation. OH_NNCompilation* compilation = OH_NNCompilation_Construct(model); if (compilation == nullptr) { - std::cout << "CreateCompilation failed, error happended when creating compilation." << std::endl; + std::cout << "CreateCompilation failed, error happened when creating compilation." << std::endl; return OH_NN_MEMORY_ERROR; } diff --git a/en/application-dev/napi/rawfile-guidelines.md b/en/application-dev/napi/rawfile-guidelines.md index c585b162a2dc483ca72a5369170b2ee0f43a01a1..ccd517b40d77362b94b76001cf921f134a0cf237 100644 --- a/en/application-dev/napi/rawfile-guidelines.md +++ b/en/application-dev/napi/rawfile-guidelines.md @@ -1,14 +1,12 @@ # Raw File Development - - ## When to Use -This document describes how to use the native Rawfile APIs to manage raw file directories and files in OpenHarmony. You can use the APIs to traverse, open, search for, read, and close raw files. +This document describes how to use the native Rawfile APIs to manage raw file directories and files in OpenHarmony. You can use Rawfile APIs to perform operations such as traversing the file list, opening, searching for, reading, and closing raw files. ## Available APIs -| API | Description | +| Name | Description | | :----------------------------------------------------------- | :--------------------------------------- | | NativeResourceManager *OH_ResourceManager_InitNativeResourceManager(napi_env env, napi_value jsResMgr) | Initializes the native resource manager. | | RawDir *OH_ResourceManager_OpenRawDir(const NativeResourceManager *mgr, const char *dirName) | Opens a raw file directory. | @@ -27,60 +25,289 @@ This document describes how to use the native Rawfile APIs to manage raw file di ## How to Develop -1. Add the header file. + The following describes how to obtain the raw file list, raw file content, and raw file descriptor on the JavaScript side as an example. + +1. Create a project. + +![Creating a C++ application](figures/rawfile1.png) + +2. Add dependencies. + +After a project is created, the **cpp** directory is created under the project. The directory contains files such as **libentry/index.d.ts**, **hello.cpp**, and **CMakeLists.txt**. + +1. Open the **src/main/cpp/CMakeLists.txt** file, and add **librawfile.z.so** and **libhilog_ndk.z.so** to **target_link_libraries**. ```c++ - #include "raw_file_manager.h" + target_link_libraries(entry PUBLIC libace_napi.z.so libhilog_ndk.z.so librawfile.z.so) ``` - - -2. Call **OH_ResourceManager_InitNativeResourceManager(napi_env env, napi_value jsResMgr)** to obtain a **NativeResourceManager** instance. +2. Open the **src/main/cpp/types/libentry/index.d.ts** file, and declare the application functions **getFileList**, **getRawFileContent**, and **getRawFileDescriptor**. - ```js - // Import the JS resource manager from the JS head file and pass it to the C++ file. - import resManager from '@ohos.resourceManager' - import rawfileTest from 'librawFileTest.so' - resManager.getResourceManager().then(resmgr => { - rawfileTest.testRawFile("test", resmgr, (error, value) => { - console.log("test rawFile"); - }) - }); - ``` - - ```c++ - // Obtain and parse the parameters in the C++ file. - NativeResourceManager* nativeResourceManager = nullptr; - std::string path; - if (i == 0 && valueType == napi_string) { - // Parse the first parameter, which is the file or directory path relative to the raw file directory. - ...... - path = buf.data(); - } else if (i == 1 && valueType == napi_object) { - // Parse the second parameter, which is the JS resource manager. - nativeResourceManager = OH_ResourceManager_InitNativeResourceManager(env, argv[i]); + ```c++ + import resourceManager from '@ohos.resourceManager'; + export const getFileList: (resmgr: resourceManager.ResourceManager, path: string) => Array; + export const getRawFileContent: (resmgr: resourceManager.ResourceManager, path: string) => Uint8Array; + export const getRawFileDescriptor: (resmgr: resourceManager.ResourceManager, path: string) => resourceManager.RawFileDescriptor; + ``` + +3. Modify the source file. + +1. Open the **src/main/cpp/hello.cpp** file. During initialization, the file maps the external JavaScript APIs **getFileList**, **getRawFileContent**, and **getRawFileDescriptor** to C++ native APIs **GetFileList**, **GetRawFileContent**, and **GetRawFileDescriptor**. + + ```c++ + EXTERN_C_START + static napi_value Init(napi_env env, napi_value exports) + { + napi_property_descriptor desc[] = { + { "getFileList", nullptr, GetFileList, nullptr, nullptr, nullptr, napi_default, nullptr }, + { "getRawFileContent", nullptr, GetRawFileContent, nullptr, nullptr, nullptr, napi_default, nullptr }, + { "getRawFileDescriptor", nullptr, GetRawFileDescriptor, nullptr, nullptr, nullptr, napi_default, nullptr } + }; + + napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc); + return exports; } + EXTERN_C_END ``` +2. Add the three functions to the **src/main/cpp/hello.cpp** file. + + ```c++ + static napi_value GetFileList(napi_env env, napi_callback_info info) + static napi_value GetRawFileContent(napi_env env, napi_callback_info info) + static napi_value GetRawFileDescriptor(napi_env env, napi_callback_info info) + ``` + +3. Obtain JavaScript resource objects from the **hello.cpp** file, and convert them to native resource objects. Then, call the native APIs to obtain the raw file list, raw file content, and raw file descriptor {fd, offset, length}. The sample code is as follows: + + ```c++ + // Example 1: Use GetFileList to obtain the raw file list. + static napi_value GetFileList(napi_env env, napi_callback_info info) + { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "NDKTest Begin"); + size_t requireArgc = 3; + size_t argc = 2; + napi_value argv[2] = { nullptr }; + // Obtain arguments of the native API. + napi_get_cb_info(env, info, &argc, argv, nullptr, nullptr); + + // Obtain argv[0], which specifies conversion of the JavaScript resource object (that is, OH_ResourceManager_InitNativeResourceManager) to a native object. + NativeResourceManager *mNativeResMgr = OH_ResourceManager_InitNativeResourceManager(env, argv[0]); + + // Obtain argv[1], which specifies the relative path of the raw file. + size_t strSize; + char strBuf[256]; + napi_get_value_string_utf8(env, argv[1], strBuf, sizeof(strBuf), &strSize); + std::string dirName(strBuf, strSize); + + // Obtain the corresponding rawDir pointer object. + RawDir* rawDir = OH_ResourceManager_OpenRawDir(mNativeResMgr, dirName.c_str()); + + // Obtain the number of files and folders in rawDir. + int count = OH_ResourceManager_GetRawFileCount(rawDir); + + // Traverse rawDir to obtain the list of file names and save it. + std::vector tempArray; + for(int i = 0; i < count; i++) { + std::string filename = OH_ResourceManager_GetRawFileName(rawDir, i); + tempArray.emplace_back(filename); + } + + napi_value fileList; + napi_create_array(env, &fileList); + for (size_t i = 0; i < tempArray.size(); i++) { + napi_value jsString; + napi_create_string_utf8(env, tempArray[i].c_str(), NAPI_AUTO_LENGTH, &jsString); + napi_set_element(env, fileList, i, jsString); + } + + // Close the rawDir pointer object. + OH_ResourceManager_CloseRawDir(rawDir); + OH_ResourceManager_ReleaseNativeResourceManager(mNativeResMgr); + return fileList; + } + + // Example 2: Use rawDir pointer object to obtain the content of the raw file. + napi_value CreateJsArrayValue(napi_env env, std::unique_ptr &data, long length) + { + napi_value buffer; + napi_status status = napi_create_external_arraybuffer(env, data.get(), length, + [](napi_env env, void *data, void *hint) { + delete[] static_cast(data); + }, nullptr, &buffer); + if (status != napi_ok) { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "Failed to create external array buffer"); + return nullptr; + } + napi_value result = nullptr; + status = napi_create_typedarray(env, napi_uint8_array, length, buffer, 0, &result); + if (status != napi_ok) { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "Failed to create media typed array"); + return nullptr; + } + data.release(); + return result; + } + static napi_value GetRawFileContent(napi_env env, napi_callback_info info) + { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "GetFileContent Begin"); + size_t requireArgc = 3; + size_t argc = 2; + napi_value argv[2] = { nullptr }; + // Obtain arguments of the native API. + napi_get_cb_info(env, info, &argc, argv, nullptr, nullptr); + + // Obtain argv[0], which specifies conversion of the JavaScript resource object (that is, OH_ResourceManager_InitNativeResourceManager) to a native object. + NativeResourceManager *mNativeResMgr = OH_ResourceManager_InitNativeResourceManager(env, argv[0]); + size_t strSize; + char strBuf[256]; + napi_get_value_string_utf8(env, argv[1], strBuf, sizeof(strBuf), &strSize); + std::string filename(strBuf, strSize); + + // Obtain the raw file pointer object. + RawFile *rawFile = OH_ResourceManager_OpenRawFile(mNativeResMgr, filename.c_str()); + if (rawFile != nullptr) { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "OH_ResourceManager_OpenRawFile success"); + } + // Obtain the size of the raw file and apply for memory. + long len = OH_ResourceManager_GetRawFileSize(rawFile); + std::unique_ptr data= std::make_unique(len); + // Read the raw file. + int res = OH_ResourceManager_ReadRawFile(rawFile, data.get(), len); + // Close the raw file pointer object. + OH_ResourceManager_CloseRawFile(rawFile); + OH_ResourceManager_ReleaseNativeResourceManager(mNativeResMgr); + // Convert the native object to a JavaScript object. + return CreateJsArrayValue(env, data, len); + } + + // Example 3: Use GetRawFileDescriptor to obtain the FD of the raw file. + napi_value createJsFileDescriptor(napi_env env, RawFileDescriptor &descriptor) + { + napi_value result; + napi_status status = napi_create_object(env, &result); + if (status != napi_ok) { + return result; + } + + napi_value fd; + status = napi_create_int32(env, descriptor.fd, &fd); + if (status != napi_ok) { + return result; + } + status = napi_set_named_property(env, result, "fd", fd); + if (status != napi_ok) { + return result; + } + + napi_value offset; + status = napi_create_int64(env, descriptor.start, &offset); + if (status != napi_ok) { + return result; + } + status = napi_set_named_property(env, result, "offset", offset); + if (status != napi_ok) { + return result; + } + + napi_value length; + status = napi_create_int64(env, descriptor.length, &length); + if (status != napi_ok) { + return result; + } + status = napi_set_named_property(env, result, "length", length); + if (status != napi_ok) { + return result; + } + return result; + } + static napi_value GetRawFileDescriptor(napi_env env, napi_callback_info info) + { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "NDKTest GetRawFileDescriptor Begin"); + size_t requireArgc = 3; + size_t argc = 2; + napi_value argv[2] = { nullptr }; + // Obtain arguments of the native API. + napi_get_cb_info(env, info, &argc, argv, nullptr, nullptr); + + napi_valuetype valueType; + napi_typeof(env, argv[0], &valueType); + // Obtain the native resourceManager object. + NativeResourceManager *mNativeResMgr = OH_ResourceManager_InitNativeResourceManager(env, argv[0]); + size_t strSize; + char strBuf[256]; + napi_get_value_string_utf8(env, argv[1], strBuf, sizeof(strBuf), &strSize); + std::string filename(strBuf, strSize); + // Obtain the raw file pointer object. + RawFile *rawFile = OH_ResourceManager_OpenRawFile(mNativeResMgr, filename.c_str()); + if (rawFile != nullptr) { + OH_LOG_Print(LOG_APP, LOG_ERROR, GLOBAL_RESMGR, tag, "OH_ResourceManager_OpenRawFile success"); + } + // Obtain the FD of the raw file, that is, RawFileDescriptor {fd, offset, length}. + RawFileDescriptor descriptor; + OH_ResourceManager_GetRawFileDescriptor(rawFile, descriptor); + // Close the raw file pointer object. + OH_ResourceManager_CloseRawFile(rawFile); + OH_ResourceManager_ReleaseNativeResourceManager(mNativeResMgr); + // Convert the native object to a JavaScript object. + return createJsFileDescriptor(env,descriptor); + } + ``` + +4. Call APIs on the JavaScript side. + +1. Open **src\main\ets\pages\index.ets**, and import **libentry.so**. + +2. Obtain the JavaScript resource object, that is, **resourceManager**. +3. Call **getFileList**, that is, the native API declared in **src/main/cpp/types/libentry/index.d.ts**. When calling the API, pass the JavaScript resource object and the relative path of the raw file. The sample code is as follows: + + ```js + import hilog from '@ohos.hilog'; + import testNapi from 'libentry.so' // Import the libentry.so file. + @Entry + @Component + struct Index { + @State message: string = 'Hello World' + private resmgr = getContext().resourceManager; // Obtain the JavaScript resource object. + build() { + Row() { + Column() { + Text(this.message) + .fontSize(50) + .fontWeight(FontWeight.Bold) + .onClick(() => { + hilog.isLoggable(0x0000, 'testTag', hilog.LogLevel.INFO); + let rawfilelist = testNapi.getFileList(this.resmgr, ""); // Pass the JavaScript resource object and the relative path of the raw file. + console.log("rawfilelist" + rawfilelist); + let rawfileContet = testNapi.getRawFileContent(this.resmgr, "rawfile1.txt"); + console.log("rawfileContet" + rawfileContet); + let rawfileDescriptor = testNapi.getRawFileDescriptor(this.resmgr, "rawfile1.txt"); + console.log("getRawFileDescriptor" + rawfileDescriptor.fd, rawfileDescriptor.offset, rawfileDescriptor.length); + }) + } + .width('100%') + } + .height('100%') + } + } + ``` + +## Using C++ Functions -3. Call **OH_ResourceManager_OpenRawDir** to obtain a **RawDir** instance based on the **NativeResourceManager** instance. +1. Call **OH_ResourceManager_OpenRawDir** to obtain a **RawDir** instance based on the **NativeResourceManager** instance. ```c++ RawDir* rawDir = OH_ResourceManager_OpenRawDir(nativeResourceManager, path.c_str()); ``` - - -4. Call **OH_ResourceManager_GetRawFileCount** to obtain the total number of raw files in the directory based on the **RawDir** instance. +2. Call **OH_ResourceManager_GetRawFileCount** to obtain the total number of raw files in the directory based on the **RawDir** instance. ```c++ int count = OH_ResourceManager_GetRawFileCount(rawDir); ``` - - -5. Call **OH_ResourceManager_GetRawFileName** to obtain the name of the raw file with the specified index. +3. Call **OH_ResourceManager_GetRawFileName** to obtain the name of the raw file with the specified index. ```c++ for (int index = 0; index < count; index++) { @@ -88,85 +315,65 @@ This document describes how to use the native Rawfile APIs to manage raw file di } ``` - - -6. Call **OH_ResourceManager_OpenRawFile** to obtain a **RawFile** instance with the specified file name. +4. Call **OH_ResourceManager_OpenRawFile** to obtain a **RawFile** instance with the specified file name. ```c++ RawFile* rawFile = OH_ResourceManager_OpenRawFile(nativeResourceManager, fileName.c_str()); ``` - - -7. Call **OH_ResourceManager_GetRawFileSize** to obtain the size of the raw file. +5. Call **OH_ResourceManager_GetRawFileSize** to obtain the size of the raw file. ```c++ long rawFileSize = OH_ResourceManager_GetRawFileSize(rawFile); ``` - - -8. Call **OH_ResourceManager_SeekRawFile** to seek a read/write position in the raw file based on the specified offset. +6. Call **OH_ResourceManager_SeekRawFile** to seek a read/write position in the raw file based on the specified offset. ```c++ int position = OH_ResourceManager_SeekRawFile(rawFile, 10, 0); int position = OH_ResourceManager_SeekRawFile(rawFile, 0 , 1); int position = OH_ResourceManager_SeekRawFile(rawFile, -10, 2); ``` - - -9. Call **OH_ResourceManager_GetRawFileOffset** to obtain the raw file offset. +7. Call **OH_ResourceManager_GetRawFileOffset** to obtain the raw file offset. ```c++ long rawFileOffset = OH_ResourceManager_GetRawFileOffset(rawFile) ``` - - -10. Call **OH_ResourceManager_ReadRawFile** to read the raw file. +8. Call **OH_ResourceManager_ReadRawFile** to read the raw file. ```c++ std::unique_ptr mediaData = std::make_unique(rawFileSize); long rawFileOffset = OH_ResourceManager_ReadRawFile(rawFile, mediaData.get(), rawFileSize); ``` - - -11. Call **OH_ResourceManager_CloseRawFile** to close the file to release resources. +9. Call **OH_ResourceManager_CloseRawFile** to close the file to release resources. ```c++ OH_ResourceManager_CloseRawFile(rawFile); ``` - - -12. Call **OH_ResourceManager_CloseRawDir** to close the raw file directory. +10. Call **OH_ResourceManager_CloseRawDir** to close the raw file directory. ```c++ OH_ResourceManager_CloseRawDir(rawDir); ``` - - -13. Call **OH_ResourceManager_GetRawFileDescriptor** to obtain the FD of the raw file. +11. Call **OH_ResourceManager_GetRawFileDescriptor** to obtain the FD of the raw file. ```c++ RawFileDescriptor descriptor; bool result = OH_ResourceManager_GetRawFileDescriptor(rawFile, descriptor); ``` - - -14. Call **OH_ResourceManager_ReleaseRawFileDescriptor** to release the FD of the raw file. +12. Call **OH_ResourceManager_ReleaseRawFileDescriptor** to release the FD of the raw file. ```c++ OH_ResourceManager_ReleaseRawFileDescriptor(descriptor); ``` - - -15. Call **OH_ResourceManager_ReleaseNativeResourceManager** to release the native resource manager. +13. Call **OH_ResourceManager_ReleaseNativeResourceManager** to release the native resource manager. ```c++ OH_ResourceManager_ReleaseNativeResourceManager(nativeResourceManager); diff --git a/en/application-dev/notification/Readme-EN.md b/en/application-dev/notification/Readme-EN.md index 55070f9e38666be2c6cd5cf87b3d3680df0293ba..f7b76df0e99484508bcb073462fd65f0ab3d03cb 100644 --- a/en/application-dev/notification/Readme-EN.md +++ b/en/application-dev/notification/Readme-EN.md @@ -1,8 +1,9 @@ # Notification - [Notification Overview](notification-overview.md) -- [Notification Subscription (for System Applications)](notification-subscription.md) +- [Notification Subscription (for System Applications Only)](notification-subscription.md) - [Enabling Notification](notification-enable.md) +- [Notification Badge](notification-badge.md) - Publishing a Notification - [Publishing a Basic Notification](text-notification.md) - [Publishing a Progress Notification](progress-bar-notification.md) diff --git a/en/application-dev/notification/notification-badge.md b/en/application-dev/notification/notification-badge.md new file mode 100644 index 0000000000000000000000000000000000000000..66d29e659d03ac147a9aa7acc0e1af24b60980c3 --- /dev/null +++ b/en/application-dev/notification/notification-badge.md @@ -0,0 +1,69 @@ +# Notification Badge + +OpenHarmony provides APIs for setting the notification badge, which is displayed in the upper right corner of the application icon on the home screen to notify the user of the count of unread notifications. + +When a new notification arrives, the count on the badge is incremented by 1. + +After a notification is read, the count on the badge is decremented by 1. If there is no unread notification, the badge is not displayed. + + +## Available APIs + +1. The notification service provides two methods to increase the count on the notification badge: + + - When publishing a notification, pass the **badgeNumber** parameter in [NotificationRequest](../reference/apis/js-apis-notificationManager.md#notificationrequest). After the notification is received, the count on the badge is incremented. + + - Call the [setBadgeNumber](../reference/apis/js-apis-notificationManager.md#setbadgenumber) API to set the count on the badge. + +2. To decrease the count on the badge, call the **setBadgeNumber** API. + +| API| Description| +| -------- | -------- | +| setBadgeNumber(badgeNumber: number, callback: AsyncCallback\): void | Sets the count on the badge.| + + +## How to Develop + +1. Import the **NotificationManager** module. + + ```ts + import notificationManager from '@ohos.notificationManager'; + ``` + +2. Increase the count on the badge. + + When publishing a notification, pass the **badgeNumber** parameter in [NotificationRequest](../reference/apis/js-apis-notificationManager.md#notificationrequest). For details, see [Publishing a Basic Notification](text-notification.md). + + In this example, the **setBadgeNumber** API is called to add a badge. This API is called after a new notification is published. + + ```ts + function setBadgeNumberCallback(err) { + if (err) { + console.info(`Set badge failed code is ${err.code}, message is ${err.message}`); + } else { + console.info(`Set badge success`); + } + } + + let badgeNumber = 10 + notificationManager.setBadgeNumber(badgeNumber, setBadgeNumberCallback); + ``` + +3. Reduce the count on the badge. + + After a notification is read, the application needs to call the API to set the number of remaining unread notifications. The badge is then updated. + + ```ts + function setBadgeNumberCallback(err) { + if (err) { + console.info(`Set badge failed code is ${err.code}, message is ${err.message}`); + } else { + console.info(`Set badge success`); + } + } + + let badgeNumber = 9 + notificationManager.setBadgeNumber(badgeNumber, setBadgeNumberCallback); + ``` + + diff --git a/en/application-dev/notification/progress-bar-notification.md b/en/application-dev/notification/progress-bar-notification.md index 8090e7e835dae7f0658127fafbf04680a4e81114..db7cae812218c2f7b6c363d204baa04dfeeb639f 100644 --- a/en/application-dev/notification/progress-bar-notification.md +++ b/en/application-dev/notification/progress-bar-notification.md @@ -3,7 +3,7 @@ The progress notification is a commonly used notification type, mainly used to display the progress of an ongoing operation, such as file downloading. When publishing a progress notification through the notification subsystem, you can use the readily available template by specifying the related attributes, such as the template name and template data. -In the [NotificationTemplate](../reference/apis/js-apis-notificationManager.md#notificationtemplate), which can only be of the progress type, **data** indicates custom template data. +In the [NotificationTemplate](../reference/apis/js-apis-inner-notification-notificationTemplate.md), which can only be of the progress type, **data** indicates custom template data. ## Available APIs diff --git a/en/application-dev/quick-start/Readme-EN.md b/en/application-dev/quick-start/Readme-EN.md index 91136d49f0db6aa18ecc86447894b922f9c263ff..631a5334c3fb783f84c4ca253f058fc386b6bc8d 100644 --- a/en/application-dev/quick-start/Readme-EN.md +++ b/en/application-dev/quick-start/Readme-EN.md @@ -25,6 +25,9 @@ - HSP - [In-Application HSP Development](in-app-hsp.md) - [Inter-Application HSP Development (for System Applications Only)](cross-app-hsp.md) + - Atomic Service + - [Atomic Service Development](atomicService.md) + - [Atomic Service Space Management (for System Applications Only)](atomicService-aging.md) - Quick Fix - [Quick Fix Overview](quickfix-principles.md) - [CLI-based Quick Fix Development](quickfix-debug.md) @@ -40,12 +43,37 @@ - [Resource Categories and Access](resource-categories-and-access.md) - Learning ArkTS - [Getting Started with ArkTS](arkts-get-started.md) - - ArkTS Syntax (Declarative UI) - - [Basic UI Description](arkts-basic-ui-description.md) - - State Management - - [Basic Concepts](arkts-state-mgmt-concepts.md) - - [State Management with Page-level Variables](arkts-state-mgmt-page-level.md) - - [State Management with Application-level Variables](arkts-state-mgmt-application-level.md) - - [Dynamic UI Element Building](arkts-dynamic-ui-elememt-building.md) - - [Rendering Control](arkts-rendering-control.md) - - [Restrictions and Extensions](arkts-restrictions-and-extensions.md) \ No newline at end of file + - Basic Syntax + - [Basic Syntax Overview](arkts-basic-syntax-overview.md) + - [Declarative UI Description](arkts-declarative-ui-description.md) + - Custom Component + - [Creating a Custom Component](arkts-create-custom-components.md) + - [Page and Custom Component Lifecycle](arkts-page-custom-components-lifecycle.md) + - [\@Builder: Custom Builder Function](arkts-builder.md) + - [\@BuilderParam: @Builder Function Reference](arkts-builderparam.md) + - [\@Styles: Definition of Resusable Styles](arkts-style.md) + - [\@Extend: Extension of Built-in Components](arkts-extend.md) + - [stateStyles: Polymorphic Style](arkts-statestyles.md) + - State Management + - [State Management Overview](arkts-state-management-overview.md) + - Component State Management + - [\@State: State Owned by Component](arkts-state.md) + - [\@Prop: One-Way Synchronization from Parent to Child Components](arkts-prop.md) + - [\@Link: Two-Way Synchronization Between Parent and Child Components](arkts-link.md) + - [\@Provide and \@Consume: Two-Way Synchronization with Descendant Components](arkts-provide-and-consume.md) + - [\@Observed and \@ObjectLink: Observing Attribute Changes in Nested Class Objects](arkts-observed-and-objectlink.md) + - Application State Management + - [Application State Management Overview](arkts-application-state-management-overview.md) + - [LocalStorage: UI State Storage](arkts-localstorage.md) + - [AppStorage: Application-wide UI State Storage](arkts-appstorage.md) + - [PersistentStorage: Application State Persistence](arkts-persiststorage.md) + - [Environment: Device Environment Query](arkts-environment.md) + - Other State Management Features + - [Overview of Other State Management Features](arkts-other-state-mgmt-functions-overview.md) + - [\@Watch: Getting Notified of State Variable Changes](arkts-watch.md) + - [$$ Syntax: Two-Way Synchronization of Built-in Components](arkts-two-way-sync.md) + - Rendering Control + - [Rendering Control Overview](arkts-rendering-control-overview.md) + - [if/else: Conditional Rendering](arkts-rendering-control-ifelse.md) + - [ForEach: Rendering of Repeated Content](arkts-rendering-control-foreach.md) + - [LazyForEach: Lazy Data Loading](arkts-rendering-control-lazyforeach.md) diff --git a/en/application-dev/quick-start/app-configuration-file.md b/en/application-dev/quick-start/app-configuration-file.md index f68ec8ee66f92910e0f11c4a8b705bfd7bbfd08d..cbc97f24e80d576f747d69eeeaec89f50c264283 100644 --- a/en/application-dev/quick-start/app-configuration-file.md +++ b/en/application-dev/quick-start/app-configuration-file.md @@ -19,7 +19,9 @@ This document gives an overview of the **app.json5** configuration file. To star "debug": false, "car": { "minAPIVersion": 8, - } + }, + "targetBundleName": "com.application.test", + "targetPriority": 50 }, } ``` @@ -28,11 +30,12 @@ This document gives an overview of the **app.json5** configuration file. To star As shown above, the **app.json5** file contains several tags. -**Table 1** Tags in the app.json5 file + **Table 1** Tags in the app.json5 file | Name| Description| Data Type| Initial Value Allowed| | -------- | -------- | -------- | -------- | | bundleName | Bundle name, which uniquely identifies an application. The value must comply with the following rules:
- Consists of letters, digits, underscores (_), and periods (.).
- Starts with a letter.
- Contains 7 to 127 bytes.
You are advised to use the reverse domain name notation, for example, *com.example.demo*, where the first part is the domain suffix **com**, the second part is the vendor/individual name, and the third part is the application name, which can be of multiple levels.
If an application is built with the system source code, you are advised to name it in *com.ohos.demo* notation, where **ohos** signifies that the application is an OpenHarmony system application.| String| No| +| bundleType| Bundle type, which is used to distinguish applications and atomic services.
- **app**: The bundle is a common application.
- **atomicService**: The bundle is an atomic service.
- **shared**: The bundle is a shared object application. | String| Yes (initial value: **"app"**)| | debug | Whether the application can be debugged. This tag is generated during compilation and building in DevEco Studio.
- **true**: The application can be debugged.
- **false**: The application cannot be debugged.| Boolean| Yes (initial value: **false**)| | icon | [Icon of the application](../application-models/application-component-configuration-stage.md). The value is an icon resource index.| String| No| | label | [Name of the application](../application-models/application-component-configuration-stage.md). The value is a string resource index.| String| No| @@ -51,3 +54,5 @@ As shown above, the **app.json5** file contains several tags. | wearable | Wearable-specific configuration, which includes **minAPIVersion** and **distributedNotificationEnabled** attributes.
When running on wearables, the application applies the attribute settings under this tag and ignores the general counterparts.| Object| Yes (initial value: general settings in the **app.json5** file)| | car | Head unit–specific configuration, which includes **minAPIVersion** and **distributedNotificationEnabled** attributes.
When running on head units, the application applies the attribute settings under this tag and ignores the general counterparts.| Object| Yes (initial value: general settings in the **app.json5** file)| | default | Default device–specific configuration, which includes **minAPIVersion** and **distributedNotificationEnabled** attributes.
When running on default devices, the application applies the attribute settings under this tag and ignores the general counterparts.| Object| Yes (initial value: general settings in the **app.json5** file)| +|targetBundleName|Target application name of the bundle. The value rule and range are the same as those of **bundleName**.|String|Yes (if the initial value is used, the target application is not an application with the overlay feature)| +|targetPriority|Priority of the application. When **targetBundleName** is set, the application is an application with the overlay feature. The value ranges from 1 to 100.|Number|Yes (initial value: **1**)| diff --git a/en/application-dev/quick-start/application-package-structure-fa.md b/en/application-dev/quick-start/application-package-structure-fa.md index 6909481445ecb1219c30ed3ae425d6b475662805..a9d647385fa9b5e9a47ebcf4ea77e546d4df3108 100644 --- a/en/application-dev/quick-start/application-package-structure-fa.md +++ b/en/application-dev/quick-start/application-package-structure-fa.md @@ -11,7 +11,7 @@ The difference between the application package structures in the FA model and st - The **assets** folder is a collection of all the resource files, library files, and code files in a HAP file. It can be further organized into the **entry** folder and the **js** folder. The **entry** folder stores the **resources** folder and the **resources.index** file. -- The **resources** folder stores resource files (such as strings and images) of the application. +- The **resources** folder stores resource files (such as strings and images) of the application. For details, see [Resource Categories and Access](resource-categories-and-access.md). - The **resources.index** file provides a resource index table, which is generated by DevEco Studio invoking the specific SDK tool. diff --git a/en/application-dev/quick-start/application-package-structure-stage.md b/en/application-dev/quick-start/application-package-structure-stage.md index cb6dc3b12ef12ff249d8afaa9871f901babd9412..0736157fd42b4b6b6a2549e9262a7d25313aa452 100644 --- a/en/application-dev/quick-start/application-package-structure-stage.md +++ b/en/application-dev/quick-start/application-package-structure-stage.md @@ -22,7 +22,7 @@ To develop an application based on the [stage model](application-configuration-f - The HAP file includes folders such as **ets**, **libs**, and **resources** and files such as **resources.index**, **module.json**, and **pack.info**. - The **ets** folder stores bytecode files generated after application code build. - The **libs** folder stores library files, which are .so binary files that contain third-party code on which the OpenHarmony application depends. - - The **resources** folder stores resource files (such as strings and images) of the application. + - The **resources** folder stores resource files (such as strings and images) of the application. For details, see [Resource Categories and Access](resource-categories-and-access.md). - The **resources.index** file provides a resource index table, which is generated when the application project is built in DevEco Studio. - The **module.json** file is the configuration file indispensable in a HAP file. It consists of **module.json5** and **app.json5** in the project configuration. While DevEco Studio provides default configuration, you must modify the configuration as needed. For details about the configuration fields, see [Application Configuration Files in Stage Model](application-configuration-file-overview-stage.md). - The **pack.info** file describes the HAP attributes in the bundle, for example, **bundleName** and **versionCode** in **app** and **name**, **type**, and **abilities** in **module**. The file is automatically generated when DevEco Studio generates the bundle. diff --git a/en/application-dev/quick-start/arkts-application-state-management-overview.md b/en/application-dev/quick-start/arkts-application-state-management-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..b481f79298e585c00a8a6e5c80f1c6a5cc092949 --- /dev/null +++ b/en/application-dev/quick-start/arkts-application-state-management-overview.md @@ -0,0 +1,13 @@ +# Application State Management Overview + + +The decorators described in the previous topics are used to share state variables within a page, that is, within a component tree. If you want to share state data at the application level or across multiple pages, you would need to apply application-level state management. ArkTS provides a wide variety of application state management capabilities: + + +- [LocalStorage](arkts-localstorage.md): API for storing the UI state, usually used for state sharing within a [UIAbility](https://gitee.com/openharmony/docs/blob/master/en/application-dev/reference/apis/js-apis-app-ability-uiAbility.md) or between pages. + +- [AppStorage](arkts-appstorage.md): special, singleton LocalStorage object within the application, which is created by the UI framework at application startup and provides the central storage for application UI state attributes. + +- [PersistentStorage](arkts-persiststorage.md): API for persisting application attributes. It is usually used together with AppStorage to persist selected AppStorage attributes to the disk so that their values are the same upon application re-start as they were when the application was closed. + +- [Environment](arkts-environment.md): a range of environment parameters regarding the device where the application runs. The environment parameters are synchronized to the AppStorage and can be used together with the AppStorage. diff --git a/en/application-dev/quick-start/arkts-appstorage.md b/en/application-dev/quick-start/arkts-appstorage.md new file mode 100644 index 0000000000000000000000000000000000000000..1bfc8360ff33c8377d1e5d476d0e2de777ee0f72 --- /dev/null +++ b/en/application-dev/quick-start/arkts-appstorage.md @@ -0,0 +1,204 @@ +# AppStorage: Application-wide UI State Storage + + +AppStorage provides the central storage for mutable application UI state attributes. It is bound to the application process and is created by the UI framework at application startup. + + +Unlike LocalStorage, which is usually used for page-level state sharing, AppStorage enables application-wide UI state sharing. AppStorage is equivalent to the hub of the entire application. [PersistentStorage](arkts-persiststorage.md) and [Environment](arkts-environment.md) data is passed first to AppStorage and then from AppStorage to the UI component. + + +This topic describes only the AppStorage application scenarios and related decorators: \@StorageProp and \@StorageLink. + + +## Overview + +AppStorage is a singleton LocalStorage object that is created by the UI framework at application startup. Its purpose is to provide the central storage for mutable application UI state attributes. AppStorage retains all those attributes and their values as long as the application remains running. Attributes are accessed using a unique key string value. + +UI components synchronize application state attributes with the AppStorage. Implementation of application business logic can access AppStorage as well. + +Selected state attributes of AppStorage can be synched with different data sources or data sinks. Those data sources and sinks can be on a local or remote device, and have different capabilities, such as data persistence (see [PersistentStorage](arkts-persiststorage.md)). These data sources and sinks are implemented in the business logic, separate from the UI. Link those AppStorage attributes to [@StorageProp](#storageprop) and [@StorageLink](#storagelink) whose values should be kept until application re-start. + + +## \@StorageProp + +As mentioned above, if you want to establish a binding between AppStorage and a custom component, you need to use the \@StorageProp and \@StorageLink decorators. Use \@StorageProp(key) or \@StorageLink(key) to decorate variables in the component. **key** identifies the attribute in AppStorage. + +When a custom component is initialized, the \@StorageProp(key)/\@StorageLink(key) decorated variable is initialized with the value of the attribute with the given key in AppStorage. Local initialization is mandatory. If an attribute with the given key is missing from AppStorage, it will be added with the stated initializing value. (Whether the attribute with the given key exists in AppStorage depends on the application logic.) + + +By decorating a variable with \@StorageProp(key), a one-way data synchronization is established with the attribute with the given key in AppStorage. A local change can be made, but it will not be synchronized to AppStorage. An update to the attribute with the given key in AppStorage will overwrite local changes. + + +### Rules of Use + +| \@StorageProp Decorator| Description | +| ------------------ | ---------------------------------------- | +| Decorator parameters | **key**: constant string, mandatory (note, the string is quoted) | +| Allowed variable types | Object, class, string, number, Boolean, enum, and array of these types. For details about the scenarios of nested objects, see [Observed Changes and Behavior](#observed-changes-and-behavior).
The type must be specified and must be the same as the corresponding attribute in LocalStorage. **any** is not supported. The **undefined** and **null** values are not allowed.| +| Synchronization type | One-way: from the attribute in AppStorage to the component variable.
The component variable can be changed locally, but an update from AppStorage will overwrite local changes.| +| Initial value for the decorated variable | Mandatory. It is used as the default value for initialization if the attribute does not exist in AppStorage.| + + +### Variable Transfer/Access Rules + +| Transfer/Access | Description | +| ---------- | ---------------------------------------- | +| Initialization and update from the parent component| Forbidden.| +| Subnode initialization | Supported; can be used to initialize a n \@State, \@Link, \@Prop, or \@Provide decorated variable in the child component.| +| Access | None. | + + + **Figure 1** \@StorageProp initialization rule + + +![en-us_image_0000001552978157](figures/en-us_image_0000001552978157.png) + + +### Observed Changes and Behavior + +**Observed Changes** + + +- When the decorated variable is of the Boolean, string, or number type, its value change can be observed. + +- When the decorated variable is of the class or Object type, its value change and value changes of all its attributes, that is, the attributes that **Object.keys(observedObject)** returns. + +- When the decorated variable is of the array type, the addition, deletion, and updates of array items can be observed. + + +**Framework Behavior** + + +- When the value change of the \@StorageProp(key) decorated variable is observed, the change is not synchronized to the attribute with the give key value in AppStorage. + +- The value change of the \@StorageProp(key) decorated variable only applies to the private member variables of the current component, but not other variables bound to the key. + +- When the data decorated by \@StorageProp(key) is a state variable, the change of the data is not synchronized to AppStorage, but the owning custom component is re-rendered. + +- When the attribute with the given key in AppStorage is updated, the change is synchronized to all the \@StorageProp(key) decorated data, and the local changes of the data are overwritten. + + +## \@StorageLink + +\@StorageLink(key) creates a two-way data synchronization with the attribute with the given key in AppStorage. + +1. If a local change occurs, it is synchronized to AppStorage. + +2. Changes in AppStorage are synchronized to all attributes with the given key, including one-way bound variables (\@StorageProp decorated variables and one-way bound variables created through \@Prop), two-way bound variables (\@StorageLink decorated variables and two-way bound variables created through \@Link), and other instances (such as PersistentStorage). + + +### Rules of Use + +| \@StorageLink Decorator| Description | +| ------------------ | ---------------------------------------- | +| Decorator parameters | **key**: constant string, mandatory (note, the string is quoted) | +| Allowed variable types | Object, class, string, number, Boolean, enum, and array of these types. For details about the scenarios of nested objects, see [Observed Changes and Behavior](#observed-changes-and-behavior).
The type must be specified and must be the same as the corresponding attribute in AppStorage. **any** is not supported. The **undefined** and **null** values are not allowed.| +| Synchronization type | Two-way: from the attribute in AppStorage to the custom component variable and back| +| Initial value for the decorated variable | Mandatory. It is used as the default value for initialization if the attribute does not exist in AppStorage.| + + +### Variable Transfer/Access Rules + +| Transfer/Access | Description | +| ---------- | ---------------------------------------- | +| Initialization and update from the parent component| Forbidden. | +| Subnode initialization | Supported; can be used to initialize a regular variable or \@State, \@Link, \@Prop, or \@Provide decorated variable in the child component.| +| Access | None. | + + + **Figure 2** \@StorageLink initialization rule + + +![en-us_image_0000001501938718](figures/en-us_image_0000001501938718.png) + + +### Observed Changes and Behavior + +**Observed Changes** + + +- When the decorated variable is of the Boolean, string, or number type, its value change can be observed. + +- When the decorated variable is of the class or Object type, its value change and value changes of all its attributes, that is, the attributes that **Object.keys(observedObject)** returns. + +- When the decorated variable is of the array type, the addition, deletion, and updates of array items can be observed. + + +**Framework Behavior** + + +1. When the value change of the \@StorageLink(key) decorated variable is observed, the change is synchronized to the attribute with the give key value in AppStorage. + +2. Once the attribute with the given key in AppStorage is updated, all the data (including \@StorageLink and \@StorageProp decorated variables) bound to the attribute key is changed synchronously. + +3. When the data decorated by \@StorageLink(key) is a state variable, the change of the data is synchronized to AppStorage, and the owning custom component is re-rendered. + + +## Application Scenarios + + +### Example of Using AppStorage and LocalStorage from Application Logic + +Since AppStorage is a singleton, its APIs are all static ones. How these APIs work resembles the non-static APIs of LocalStorage. + + +```ts +AppStorage.SetOrCreate('PropA', 47); + +let storage: LocalStorage = new LocalStorage({ 'PropA': 17 }); +let propA: number = AppStorage.Get('PropA') // propA in AppStorage == 47, propA in LocalStorage == 17 +var link1: SubscribedAbstractProperty = AppStorage.Link('PropA'); // link1.get() == 47 +var link2: SubscribedAbstractProperty = AppStorage.Link('PropA'); // link2.get() == 47 +var prop: SubscribedAbstractProperty = AppStorage.Prop('PropA'); // prop.get() = 47 + +link1.set(48); // two-way sync: link1.get() == link2.get() == prop.get() == 48 +prop.set(1); // one-way sync: prop.get()=1; but link1.get() == link2.get() == 48 +link1.set(49); // two-way sync: link1.get() == link2.get() == prop.get() == 49 + +storage.get('PropA') // == 17 +storage.set('PropA', 101); +storage.get('PropA') // == 101 + +AppStorage.Get('PropA') // == 49 +link1.get() // == 49 +link2.get() // == 49 +prop.get() // == 49 +``` + + +### Example of Using AppStorage and LocalStorage from Inside the UI + +\@StorageLink works together with the AppStorage in the same way as \@LocalStorageLink works together with LocalStorage. It creates two-way data synchronization with an attribute in AppStorage. + + +```ts +AppStorage.SetOrCreate('PropA', 47); +let storage = new LocalStorage({ 'PropA': 48 }); + +@Entry(storage) +@Component +struct CompA { + @StorageLink('PropA') storLink: number = 1; + @LocalStorageLink('PropA') localStorLink: number = 1; + + build() { + Column({ space: 20 }) { + Text(`From AppStorage ${this.storLink}`) + .onClick(() => this.storLink += 1) + + Text(`From LocalStorage ${this.localStorLink}`) + .onClick(() => this.localStorLink += 1) + } + } +} +``` + + +## Restrictions + +When using AppStorage together with [PersistentStorage](arkts-persiststorage.md) and [Environment](arkts-environment.md), pay attention to the following: + +- A call to **PersistentStorage.PersistProp()** after creating the attribute in AppStorage uses the type and value in AppStorage and overwrites any attribute with the same name in PersistentStorage. In light of this, the opposite order of calls is recommended. For an example of incorrect usage, see [Accessing Attribute in AppStorage Before PersistentStorage](arkts-persiststorage.md#accessing-attribute-in-appstorage-before-persistentstorage). + +- A call to **Environment.EnvProp()** after creating the attribute in AppStorage will fail. This is because AppStorage already has an attribute with the same name, and the environment variable will not be written into AppStorage. Therefore, you are advised not to use the preset environment variable name in AppStorage. + diff --git a/en/application-dev/quick-start/arkts-basic-syntax-overview.md b/en/application-dev/quick-start/arkts-basic-syntax-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..59dd8e9e0ebb02f4f76b59985fe45e2e6b1f4cdd --- /dev/null +++ b/en/application-dev/quick-start/arkts-basic-syntax-overview.md @@ -0,0 +1,40 @@ +# Basic Syntax Overview + + +With a basic understanding of the ArkTS language, let's look into the basic composition of ArkTS through an example. As shown below, when the user clicks the button, the text content changes from **Hello World** to **Hello ArkUI**. + + + **Figure 1** Example effect drawing + +![Video_2023-03-06_152548](figures/Video_2023-03-06_152548.gif) + + +In this example, the basic composition of ArkTS is as follows. + + + **Figure 2** Basic composition of ArkTS + +![arkts-basic-grammar](figures/arkts-basic-grammar.png) + + +- Decorator: design pattern used to decorate classes, structures, methods, and variables to assign special meanings to them. In the preceding sample code, \@Entry, \@Component, and \@State are decorators. \@Component indicates a custom component, \@Entry indicates that the custom component is an entry component, and \@State indicates a state variable in the component, whose change will trigger the UI to re-render. + +- [UI description](arkts-declarative-ui-description.md): declarative description of the UI structure, such as the code block of the **build()** method. + +- [Custom component](arkts-create-custom-components.md): reusable UI unit, which can be combined with other components, such as the struct **Hello** decorated by @Component. + +- Built-in component: default basic or container component preset in ArkTS, which can be directly invoked, such as** \**,** \**, **\**, and **\