diff --git a/metal/paddle-mobile/paddle-mobile/API/Net.swift b/metal/paddle-mobile/paddle-mobile/API/Net.swift index fadc6fb60c0c9e68c275df7c784ff4b82831450b..aa6b43e9bfb9a10e2b9b7f598c63684b643cddc5 100644 --- a/metal/paddle-mobile/paddle-mobile/API/Net.swift +++ b/metal/paddle-mobile/paddle-mobile/API/Net.swift @@ -59,6 +59,9 @@ import Foundation /// 是否使用 MetalPerformanceShaders 进行运算, 运算精度为 32 位时不支持开启 MPS @objc public var useMPS: Bool = false + /// 是否使用最高等级的加速策略 + @objc public var useAggressiveOptimization: Bool = false + /// 模型精度 @objc public var paramPrecision: Precision = .Float32 diff --git a/metal/paddle-mobile/paddle-mobile/API/Runner.swift b/metal/paddle-mobile/paddle-mobile/API/Runner.swift index c2f6521075ae4a9b6b2907d57f24d68a17ca09da..730acd59478e1936c76efaaa355003dd0a8c9cad 100644 --- a/metal/paddle-mobile/paddle-mobile/API/Runner.swift +++ b/metal/paddle-mobile/paddle-mobile/API/Runner.swift @@ -95,6 +95,7 @@ import Foundation initContext.metalLoadMode = net.metalLoadMode initContext.metalLibPath = net.metalLibPath initContext.useMPS = net.useMPS + initContext.useAggresiveOptimization = net.useAggressiveOptimization switch net.paramPrecision { case .Float16: diff --git a/metal/paddle-mobile/paddle-mobile/Src/Operators/Base/Operator.swift b/metal/paddle-mobile/paddle-mobile/Src/Operators/Base/Operator.swift index 32f044c53edf7d4ef7cd8afa13453aae1066fc76..85474cb5a9486b1fc53e544e3c690e443a93dc6e 100644 --- a/metal/paddle-mobile/paddle-mobile/Src/Operators/Base/Operator.swift +++ b/metal/paddle-mobile/paddle-mobile/Src/Operators/Base/Operator.swift @@ -70,6 +70,9 @@ public class InitContext { /// 是否使用 MetalPerformanceShaders 进行运算 var useMPS: Bool = false + /// 是否使用最高等级的加速策略 + var useAggresiveOptimization: Bool = false + init() { metalLoadMode = .LoadMetalInDefaultLib metalLibPath = nil diff --git a/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddKernel.swift b/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddKernel.swift index 155a5b7841c6989814b5f90ce8153a401723eac8..e4fa5b1d67ad0ffc88bfe934cc24d3cb9f14e869 100644 --- a/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddKernel.swift +++ b/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddKernel.swift @@ -110,9 +110,11 @@ class ConvAddKernel: Kernel, Computable { } var shouldUseMPS = false - let functionName = type(of: self).kernelFunctionName(param: param) - if #available(iOS 11.0, *), initContext.useMPS { - shouldUseMPS = true + let functionName = type(of: self).kernelFunctionName(param: param, useAggressiveOptimization: initContext.useAggresiveOptimization) + if #available(iOS 11.0, *), (initContext.useMPS || initContext.useAggresiveOptimization) { + if (param.input.tensorDim[1] == 1 || param.input.tensorDim[1] > 4) && (param.output.tensorDim[1] == 1 || param.output.tensorDim[1] > 4) { + shouldUseMPS = true + } } if type(of: self).isWinoGrad(functionName: functionName) { shouldUseMPS = false @@ -121,7 +123,6 @@ class ConvAddKernel: Kernel, Computable { super.init(device: device, inFunctionName: nil, initContext: initContext) setupWithMPS(device: device, param: param) } else { - if functionName == nil { fatalError(" unsupport yet ") } @@ -203,7 +204,7 @@ class ConvAddKernel: Kernel, Computable { param.y.initBuffer(device: device, precision: GlobalConfig.shared.computePrecision) } - open class func kernelFunctionName(param: ConvAddParam

) -> String? { + open class func kernelFunctionName(param: ConvAddParam

, useAggressiveOptimization: Bool = false) -> String? { if GlobalConfig.shared.computePrecision == .Float16 { if param.filter.width == 1 && param.filter.height == 1 { return "conv_add_1x1_half" diff --git a/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddReluKernel.swift b/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddReluKernel.swift index 13843fd846807d69b209fdb74828d9d78c20d129..fc43a6c17efbe250047eb4217873eb652e43cae5 100644 --- a/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddReluKernel.swift +++ b/metal/paddle-mobile/paddle-mobile/Src/Operators/Kernels/ConvAddReluKernel.swift @@ -10,16 +10,20 @@ import Foundation import MetalPerformanceShaders class ConvAddReluKernel: ConvAddKernel

{ - override class func kernelFunctionName(param: ConvAddParam

) -> String? { + override class func kernelFunctionName(param: ConvAddParam

, useAggressiveOptimization: Bool = false) -> String? { if GlobalConfig.shared.computePrecision == .Float16 { if param.filter.width == 1 && param.filter.height == 1 { return "conv_add_relu_1x1_half" } else if param.filter.channel == 1 && param.filter.n == param.input.tensorDim[1] { - if param.filter.width == 3 && param.filter.height == 3 && param.stride[0] == 1 && param.stride[1] == 1 && param.filter.n == 16 { - return "depthwise_conv_add_relu_3x3_half_winograd" - } else { - return "depthwise_conv_add_relu_3x3_half" + if useAggressiveOptimization { + let couldUseWinograd = param.filter.width == 3 && param.filter.height == 3 + && param.filter.n == 16 && param.stride[0] == 1 && param.stride[1] == 1 + && param.dilations[0] == 1 && param.dilations[1] == 1 + if couldUseWinograd { + return "depthwise_conv_add_relu_3x3_half_winograd" + } } + return "depthwise_conv_add_relu_3x3_half" } else if param.filter.width == 3 && param.filter.height == 3 { return "conv_add_relu_3x3_half" } else if param.filter.width == 1 && param.filter.height == 5 {