提交 15d6373d 编写于 作者: D DCloud_LXH

feat(app-plus): createInnerAudioContext

上级 b611bf16
......@@ -6,6 +6,7 @@ export * from './service/base/eventBus'
export * from './service/context/createVideoContext'
export * from './service/context/createMapContext'
export * from './service/context/canvas'
export * from './service/context/innerAudio'
export * from './service/ui/createIntersectionObserver'
export * from './service/ui/createMediaQueryObserver'
......
//#region types
export type InnerAudioContextEvent =
| 'onCanplay'
| 'onPlay'
| 'onPause'
| 'onStop'
| 'onEnded'
| 'onTimeUpdate'
| 'onError'
| 'onWaiting'
| 'onSeeking'
| 'onSeeked'
export type InnerAudioContextOff =
| 'offCanplay'
| 'offPlay'
| 'offPause'
| 'offStop'
| 'offEnded'
| 'offTimeUpdate'
| 'offError'
| 'offWaiting'
| 'offSeeking'
| 'offSeeked'
//#endregion
/**
* 可以批量设置的监听事件
*/
export const innerAudioContextEventNames: InnerAudioContextEvent[] = [
'onCanplay',
'onPlay',
'onPause',
'onStop',
'onEnded',
'onTimeUpdate',
'onError',
'onWaiting',
'onSeeking',
'onSeeked',
]
export const innerAudioContextOffEventNames: InnerAudioContextOff[] = [
'offCanplay',
'offPlay',
'offPause',
'offStop',
'offEnded',
'offTimeUpdate',
'offError',
'offWaiting',
'offSeeking',
'offSeeked',
]
export function getBaseSystemInfo() {
return {}
}
export function getRealPath() {}
export { getRealPath } from './getRealPath'
export function operateVideoPlayer() {}
export function operateMap() {}
export function requestComponentInfo() {}
......
import { extend } from '@vue/shared'
import { getRealPath } from '@dcloudio/uni-platform'
import {
API_CREATE_INNER_AUDIO_CONTEXT,
defineSyncApi,
} from '@dcloudio/uni-api'
import type { API_TYPE_CREATEE_INNER_AUDIO_CONTEXT } from '@dcloudio/uni-api'
import { once } from '@dcloudio/uni-shared'
import {
InnerAudioContextEvent,
innerAudioContextEventNames,
innerAudioContextOffEventNames,
} from '@dcloudio/uni-api'
type ExtendAudio = {
src?: string
volume?: number
startTime?: number
isStopped?: boolean
initStateChage?: boolean
}
type Audio = PlusAudioAudioPlayer & ExtendAudio
type AudioEvnets = NonNullable<
Parameters<PlusAudioAudioPlayer['addEventListener']>[0]
>
type OperationType = 'play' | 'pause' | 'stop' | 'seek'
const audios: Record<string, Audio> = {}
const evts: AudioEvnets[] = [
'play',
'canplay',
'ended',
'stop',
'waiting',
'seeking',
'seeked',
'pause',
]
const initStateChage = (audioId: string) => {
const audio = audios[audioId]
if (!audio) {
return
}
if (!audio.initStateChage) {
audio.initStateChage = true
audio.addEventListener('error', (error) => {
onAudioStateChange({
state: 'error',
audioId,
errMsg: 'MediaError',
errCode: error.code,
})
})
evts.forEach((event) => {
audio.addEventListener(event, () => {
// 添加 isStopped 属性是为了解决 安卓设备停止播放后获取播放进度不正确的问题
if (event === 'play') {
audio.isStopped = false
} else if (event === 'stop') {
audio.isStopped = true
}
onAudioStateChange({
state: event,
audioId,
})
})
})
}
}
function createAudioInstance() {
const audioId = `${Date.now()}${Math.random()}`
const audio: Audio = (audios[audioId] = plus.audio.createPlayer())
audio.src = ''
audio.volume = 1
audio.startTime = 0
return {
errMsg: 'createAudioInstance:ok',
audioId,
}
}
function setAudioState({
audioId,
src,
startTime,
autoplay = false,
loop = false,
obeyMuteSwitch,
volume,
}: {
audioId: string
autoplay?: boolean
loop?: boolean
obeyMuteSwitch?: boolean
src?: string
startTime?: number
volume?: number
}) {
const audio = audios[audioId]
if (audio) {
const style: { loop: boolean; autoplay: boolean } & ExtendAudio = {
loop,
autoplay,
}
if (src) {
audio.src = style.src = getRealPath(src)
}
if (startTime) {
audio.startTime = style.startTime = startTime
}
if (typeof volume === 'number') {
audio.volume = style.volume = volume
}
audio.setStyles(style)
initStateChage(audioId)
}
return {
errMsg: 'setAudioState:ok',
}
}
function getAudioState({ audioId }: { audioId: string }) {
const audio = audios[audioId]
if (!audio) {
return {
errMsg: 'getAudioState:fail',
}
}
const { src, startTime, volume } = audio
return {
errMsg: 'getAudioState:ok',
duration: 1e3 * (audio.getDuration() || 0),
currentTime: audio.isStopped ? 0 : 1e3 * audio.getPosition(),
paused: audio.isPaused(),
src,
volume,
startTime: 1e3 * startTime!,
buffered: 1e3 * audio.getBuffered(),
}
}
function operateAudio({
operationType,
audioId,
currentTime,
}: {
operationType: OperationType
audioId: string
currentTime?: number
}) {
const audio = audios[audioId]
switch (operationType) {
case 'play':
case 'pause':
case 'stop':
audio[
operationType === 'play' && audio.isPaused() ? 'resume' : operationType
]()
break
case 'seek':
typeof currentTime != 'undefined' ? audio.seekTo(currentTime / 1e3) : ''
break
}
return {
errMsg: 'operateAudio:ok',
}
}
const innerAudioContexts: Record<string, InnerAudioContext> =
Object.create(null)
const onAudioStateChange = ({
state,
audioId,
errMsg,
errCode,
}: {
state: AudioEvnets
audioId: string
errMsg?: string
errCode?: unknown
}) => {
const audio = innerAudioContexts[audioId]
if (audio) {
emit(audio, state, errMsg, errCode)
if (state === 'play') {
const oldCurrentTime = audio.currentTime
audio.__timing = setInterval(() => {
const currentTime = audio.currentTime
if (currentTime !== oldCurrentTime) {
emit(audio, 'timeupdate' as any)
}
}, 200)
} else if (state === 'pause' || state === 'stop' || state === 'error') {
clearInterval(audio.__timing)
}
}
}
const props = [
{
name: 'src',
cache: true,
},
{
name: 'startTime',
default: 0,
cache: true,
},
{
name: 'autoplay',
default: false,
cache: true,
},
{
name: 'loop',
default: false,
cache: true,
},
{
name: 'obeyMuteSwitch',
default: true,
readonly: true,
cache: true,
},
{
name: 'duration',
readonly: true,
},
{
name: 'currentTime',
readonly: true,
},
{
name: 'paused',
readonly: true,
},
{
name: 'buffered',
readonly: true,
},
{
name: 'volume',
},
]
class InnerAudioContext implements UniApp.InnerAudioContext {
/**
* 当前音频的长度(单位:s),只有在当前有合法的 src 时返回
*/
'duration': UniApp.InnerAudioContext['duration']
/**
* 音频开始播放的位置(单位:s)
*/
'startTime': UniApp.InnerAudioContext['startTime']
/**
* 当前音频的播放位置(单位:s),只有在当前有合法的 src 时返回
*/
'currentTime': UniApp.InnerAudioContext['currentTime']
/**
* 当前是是否暂停或停止状态,true 表示暂停或停止,false 表示正在播放
*/
'paused': UniApp.InnerAudioContext['paused']
/**
* 音频的数据链接,用于直接播放。
*/
'src': UniApp.InnerAudioContext['src']
/**
* 音频缓冲的时间点,仅保证当前播放时间点到此时间点内容已缓冲
*/
'buffered': UniApp.InnerAudioContext['buffered']
/**
* 是否自动开始播放,默认 false
*/
'autoplay': UniApp.InnerAudioContext['autoplay']
/**
* 是否循环播放,默认 false
*/
'loop': UniApp.InnerAudioContext['loop']
/**
* 是否遵循系统静音开关,当此参数为 false 时,即使用户打开了静音开关,也能继续发出声音,默认值 true
*/
'obeyMuteSwitch': UniApp.InnerAudioContext['obeyMuteSwitch']
/**
* 音量。范围 0~1。
*/
'volume': UniApp.InnerAudioContext['volume']
/**
* 事件监听
*/
_callbacks: Partial<Record<InnerAudioContextEvent, Array<Function>>>
/**
*
* @param id 当前Audio示例id
*/
id: string
/**
*
* @param __timing 当前Audio所使用的timer
*/
__timing?: number
_options: Data
constructor(id: string) {
this.id = id
this._callbacks = {}
this._options = {}
// 初始化事件监听列表
innerAudioContextEventNames.forEach((eventName) => {
this._callbacks[eventName] = []
})
props.forEach((item) => {
const name = item.name
Object.defineProperty(this, name, {
get: () => {
const result = item.cache
? this._options
: getAudioState({
audioId: this.id,
})
const value = name in result ? result[name] : item.default
return typeof value === 'number' && name !== 'volume'
? value / 1e3
: value
},
set: item.readonly
? undefined
: (value) => {
this._options[name] = value
setAudioState(
extend({}, this._options, {
audioId: this.id,
})
)
},
})
})
initInnerAudioContextEventOnce()
}
play() {
this._operate('play')
}
pause() {
this._operate('pause')
}
stop() {
this._operate('stop')
}
seek(position: number) {
this._operate('seek', {
currentTime: position * 1e3,
})
}
destroy() {
clearInterval(this.__timing)
if (audios[this.id]) {
audios[this.id].close()
delete audios[this.id]
}
delete innerAudioContexts[this.id]
}
_operate(type: OperationType, options?: Data) {
operateAudio(
extend({}, options, {
audioId: this.id,
operationType: type,
})
)
}
'onCanplay': UniApp.InnerAudioContext['onCanplay']
'onPlay': UniApp.InnerAudioContext['onPlay']
'onPause': UniApp.InnerAudioContext['onPause']
'onStop': UniApp.InnerAudioContext['onStop']
'onEnded': UniApp.InnerAudioContext['onEnded']
'onTimeUpdate': UniApp.InnerAudioContext['onTimeUpdate']
'onError': UniApp.InnerAudioContext['onError']
'onWaiting': UniApp.InnerAudioContext['onWaiting']
'onSeeking': UniApp.InnerAudioContext['onSeeking']
'onSeeked': UniApp.InnerAudioContext['onSeeked']
'offCanplay': UniApp.InnerAudioContext['offCanplay']
'offPlay': UniApp.InnerAudioContext['offPlay']
'offPause': UniApp.InnerAudioContext['offPause']
'offStop': UniApp.InnerAudioContext['offStop']
'offEnded': UniApp.InnerAudioContext['offEnded']
'offTimeUpdate': UniApp.InnerAudioContext['offTimeUpdate']
'offError': UniApp.InnerAudioContext['offError']
'offWaiting': UniApp.InnerAudioContext['offWaiting']
'offSeeking': UniApp.InnerAudioContext['offSeeking']
'offSeeked': UniApp.InnerAudioContext['offSeeked']
}
const initInnerAudioContextEventOnce = /*#__PURE__*/ once(() => {
// 批量设置音频上下文事件监听方法
innerAudioContextEventNames.forEach((eventName) => {
InnerAudioContext.prototype[eventName] = function (callback: Function) {
if (typeof callback === 'function') {
this._callbacks[eventName]!.push(callback)
}
}
})
// 批量设置音频上下文事件取消监听方法
innerAudioContextOffEventNames.forEach((eventName) => {
InnerAudioContext.prototype[eventName] = function (callback: Function) {
const callbacks = this._callbacks[eventName as InnerAudioContextEvent]!
const index = callbacks.indexOf(callback)
if (index >= 0) {
callbacks.splice(index, 1)
}
}
})
})
function emit(
audio: InnerAudioContext,
state: AudioEvnets,
errMsg?: string,
errCode?: unknown
) {
const name = `on${
state[0].toUpperCase() + state.substr(1)
}` as InnerAudioContextEvent
audio._callbacks[name]!.forEach((callback) => {
if (typeof callback === 'function') {
callback(
state === 'error'
? {
errMsg,
errCode,
}
: {}
)
}
})
}
/**
* 创建音频上下文
*/
export const createInnerAudioContext =
defineSyncApi<API_TYPE_CREATEE_INNER_AUDIO_CONTEXT>(
API_CREATE_INNER_AUDIO_CONTEXT,
() => {
const { audioId } = createAudioInstance()
const innerAudioContext = new InnerAudioContext(audioId)
innerAudioContexts[audioId] = innerAudioContext
return innerAudioContext
}
)
export * from './file/getFileInfo'
export * from './device/compass'
export * from './device/vibrate'
export * from './device/accelerometer'
export * from './media/getImageInfo'
export * from './media/getVideoInfo'
export * from './keyboard/keyboard'
export * from './network/downloadFile'
export * from './context/createInnerAudioContext'
......@@ -3707,6 +3707,30 @@ const canvasToTempFilePath = /* @__PURE__ */ defineAsyncApi(API_CANVAS_TO_TEMP_F
callbackId: cId
});
}, CanvasToTempFilePathProtocol, CanvasToTempFilePathOptions);
const innerAudioContextEventNames = [
"onCanplay",
"onPlay",
"onPause",
"onStop",
"onEnded",
"onTimeUpdate",
"onError",
"onWaiting",
"onSeeking",
"onSeeked"
];
const innerAudioContextOffEventNames = [
"offCanplay",
"offPlay",
"offPause",
"offStop",
"offEnded",
"offTimeUpdate",
"offError",
"offWaiting",
"offSeeking",
"offSeeked"
];
const defaultOptions = {
thresholds: [0],
initialRatio: 0,
......@@ -4834,8 +4858,7 @@ const API_SHOW_TOAST = "showToast";
const SHOW_TOAST_ICON = [
"success",
"loading",
"none",
"error"
"none"
];
const ShowToastProtocol = {
title: String,
......@@ -15048,30 +15071,6 @@ var MapControl = /* @__PURE__ */ defineSystemComponent({
};
}
});
const innerAudioContextEventNames = [
"onCanplay",
"onPlay",
"onPause",
"onStop",
"onEnded",
"onTimeUpdate",
"onError",
"onWaiting",
"onSeeking",
"onSeeked"
];
const innerAudioContextOffEventNames = [
"offCanplay",
"offPlay",
"offPause",
"offStop",
"offEnded",
"offTimeUpdate",
"offError",
"offWaiting",
"offSeeking",
"offSeeked"
];
const initInnerAudioContextEventOnce = /* @__PURE__ */ once(() => {
innerAudioContextEventNames.forEach((eventName) => {
InnerAudioContext.prototype[eventName] = function(callback) {
......@@ -15093,7 +15092,6 @@ const initInnerAudioContextEventOnce = /* @__PURE__ */ once(() => {
class InnerAudioContext {
constructor() {
this._src = "";
initInnerAudioContextEventOnce();
var audio = this._audio = new Audio();
this._stoping = false;
const propertys = [
......@@ -15165,6 +15163,7 @@ class InnerAudioContext {
});
}, false);
});
initInnerAudioContextEventOnce();
}
play() {
this._stoping = false;
......@@ -17446,24 +17445,11 @@ var Toast = /* @__PURE__ */ defineComponent({
}
});
function useToastIcon(props2) {
const Icon = computed(() => {
switch (props2.icon) {
case "success":
return createVNode(createSvgIconVNode(ICON_PATH_SUCCESS_NO_CIRCLE, "#fff", 38), {
class: ToastIconClassName
});
case "error":
return createVNode(createSvgIconVNode(ICON_PATH_WARN, "#fff", 38), {
class: ToastIconClassName
});
case "loading":
return createVNode("i", {
"class": [ToastIconClassName, "uni-loading"]
}, null, 2);
default:
return null;
}
});
const Icon = computed(() => props2.icon === "success" ? createVNode(createSvgIconVNode(ICON_PATH_SUCCESS_NO_CIRCLE, "#fff", 38), {
class: ToastIconClassName
}) : props2.icon === "loading" ? createVNode("i", {
"class": [ToastIconClassName, "uni-loading"]
}, null, 2) : null);
return {
Icon
};
......
......@@ -5,32 +5,13 @@ import {
} from '@dcloudio/uni-api'
import type { API_TYPE_CREATEE_INNER_AUDIO_CONTEXT } from '@dcloudio/uni-api'
import { once } from '@dcloudio/uni-shared'
import {
InnerAudioContextEvent,
innerAudioContextEventNames,
innerAudioContextOffEventNames,
} from '@dcloudio/uni-api'
//#region types
type InnerAudioContextEvent =
| 'onCanplay'
| 'onPlay'
| 'onPause'
| 'onStop'
| 'onEnded'
| 'onTimeUpdate'
| 'onError'
| 'onWaiting'
| 'onSeeking'
| 'onSeeked'
type InnerAudioContextOff =
| 'offCanplay'
| 'offPlay'
| 'offPause'
| 'offStop'
| 'offEnded'
| 'offTimeUpdate'
| 'offError'
| 'offWaiting'
| 'offSeeking'
| 'offSeeked'
type Property =
| 'src'
| 'autoplay'
......@@ -41,38 +22,8 @@ type Property =
| 'volume'
type InnerAudioProperty = keyof Pick<HTMLMediaElement, Property>
//#endregion
/**
* 可以批量设置的监听事件
*/
const innerAudioContextEventNames: InnerAudioContextEvent[] = [
'onCanplay',
'onPlay',
'onPause',
'onStop',
'onEnded',
'onTimeUpdate',
'onError',
'onWaiting',
'onSeeking',
'onSeeked',
]
const innerAudioContextOffEventNames: InnerAudioContextOff[] = [
'offCanplay',
'offPlay',
'offPause',
'offStop',
'offEnded',
'offTimeUpdate',
'offError',
'offWaiting',
'offSeeking',
'offSeeked',
]
const initInnerAudioContextEventOnce = /*#__PURE__*/ once(() => {
// 批量设置音频上下文事件监听方法
innerAudioContextEventNames.forEach((eventName) => {
......@@ -159,7 +110,6 @@ class InnerAudioContext implements UniApp.InnerAudioContext {
* 音频上下文初始化
*/
constructor() {
initInnerAudioContextEventOnce()
var audio = (this._audio = new Audio())
this._stoping = false
// 和audio对象同名同效果的属性
......@@ -248,6 +198,7 @@ class InnerAudioContext implements UniApp.InnerAudioContext {
false
)
})
initInnerAudioContextEventOnce()
}
/**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册