提交 7db81656 编写于 作者: L liuruilong

operator init texture itself

上级 1b50a6df
......@@ -28,7 +28,7 @@ class ViewController: UIViewController {
var selectImage: UIImage?
var program: Program?
var executor: Executor<Float32>?
var modelType: SupportModel = .mobilenet
var modelType: SupportModel = SupportModel.supportedModels()[0]
var toPredictTexture: MTLTexture?
var modelHelper: Net {
return modelHelperMap[modelType] ?! " has no this type "
......
......@@ -10,7 +10,6 @@ import UIKit
import paddle_mobile
class ViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
print(" done ")
......
......@@ -81,11 +81,140 @@ extension Float32: PrecisionType {
}
}
public enum DataLayout {
case NCHW
case NHWC
// N - 0 C - 1 H - 2 W - 3
struct DataLayout {
static func NCHW(dim: Dim = Dim.init(inDim: [0, 0, 0, 0])) -> DataLayout {
return DataLayout.init([(.N, dim[0]), (.C, dim[1]), (.H, dim[2]), (.W, dim[3])])
}
static func NHWC(dim: Dim = Dim.init(inDim: [0, 0, 0, 0])) -> DataLayout {
return DataLayout.init([(.N, dim[0]), (.H, dim[1]), (.W, dim[2]), (.C, dim[3])])
}
func count() -> Int {
return layoutWithDim.count
}
var N: Int? {
get {
for layoutDim in layoutWithDim {
if layoutDim.0 == .N {
return layoutDim.1
}
}
return nil
}
set {
var newN = (Layout.N, newValue)
if let index = layoutWithDim.index(where: { (layout: Layout, dim: Int) -> Bool in
return layout == .N
}) {
fatalError()
}
}
}
var C: Int? {
get {
for layoutDim in layoutWithDim {
if layoutDim.0 == .C {
return layoutDim.1
}
}
return nil
}
set {
var newN = (Layout.C, newValue)
if let index = layoutWithDim.index(where: { (layout: Layout, dim: Int) -> Bool in
return layout == .N
}) {
fatalError()
}
}
}
var H: Int? {
get {
for layoutDim in layoutWithDim {
if layoutDim.0 == .H {
return layoutDim.1
}
}
return nil
}
set {
var newN = (Layout.H, newValue)
if let index = layoutWithDim.index(where: { (layout: Layout, dim: Int) -> Bool in
return layout == .H
}) {
fatalError()
}
}
}
var W: Int? {
get {
for layoutDim in layoutWithDim {
if layoutDim.0 == .W {
return layoutDim.1
}
}
return nil
}
set {
var newN = (Layout.W, newValue)
if let index = layoutWithDim.index(where: { (layout: Layout, dim: Int) -> Bool in
return layout == .W
}) {
fatalError()
}
}
}
init(_ inLayout: [(Layout, Int)]) {
layoutWithDim = inLayout
}
func layout() -> [Layout] {
return layoutWithDim.map({ (layout: Layout, dim: Int) -> Layout in
return layout
})
}
var layoutWithDim: [(Layout, Int)] = [(.N, 0), (.C, 0), (.H, 0), (.W, 0)]
func convertTo(inLayout: [Layout]) {
}
enum Layout: Int{
case N = 0
case C = 1
case H = 2
case W = 3
static func defaultLayout() -> [Layout] {
return [N, C, H, W]
}
}
}
extension DataLayout: Equatable {
public static func == (lhs: DataLayout, rhs: DataLayout) -> Bool {
if lhs.layoutWithDim.count == rhs.layoutWithDim.count {
var result = true
for i in 0..<lhs.layoutWithDim.count {
result = (lhs.layoutWithDim[i] == rhs.layoutWithDim[i])
}
return result
} else {
return false
}
}
}
protocol Variant: CustomStringConvertible, CustomDebugStringConvertible {
}
......
......@@ -161,11 +161,11 @@ public class Loader<P: PrecisionType> {
} catch let error {
throw error
}
tensor.convert(to: .NHWC)
tensor.convert(to: DataLayout.NHWC())
// tensor.initBuffer(device: device)
scope[varDesc.name] = tensor
} else {
let dim = Dim.init(inDim: tensorDesc.NHWCDim)
let dim = Dim.init(inDim: tensorDesc.dims)
scope[varDesc.name] = Texture<P>.init(device: device, inDim: dim)
}
} else {
......
......@@ -50,6 +50,8 @@ class ConvAddBatchNormReluKernel<P: PrecisionType>: Kernel, Computable, Testable
required init(device: MTLDevice, param: ConvAddBatchNormReluParam<P>) {
param.output.initTexture(device: device, transpose: [0, 2, 3, 1])
if param.filter.width == 1 && param.filter.height == 1 {
super.init(device: device, inFunctionName: "conv_add_batch_norm_relu_1x1")
} else if param.filter.channel == 1 {
......@@ -60,12 +62,12 @@ class ConvAddBatchNormReluKernel<P: PrecisionType>: Kernel, Computable, Testable
param.filter.initBuffer(device: device, precision: Tensor.BufferPrecision.Float32)
param.y.initBuffer(device: device, precision: Tensor.BufferPrecision.Float32)
param.variance.initBuffer(device: device)
param.mean.initBuffer(device: device)
param.scale.initBuffer(device: device)
param.bias.initBuffer(device: device)
let offsetX = param.filter.width/2 - Int(param.paddings[0])
let offsetY = param.filter.height/2 - Int(param.paddings[1])
......
......@@ -13,6 +13,7 @@
limitations under the License. */
import Foundation
import MetalPerformanceShaders
class ConvAddKernel<P: PrecisionType>: Kernel, Computable {
var metalParam: MetalConvParam!
......@@ -32,6 +33,8 @@ class ConvAddKernel<P: PrecisionType>: Kernel, Computable {
}
func compute(commandBuffer: MTLCommandBuffer, param: ConvAddParam<P>) throws {
guard let encoder = commandBuffer.makeComputeCommandEncoder() else {
throw PaddleMobileError.predictError(message: " encode is nil")
}
......
......@@ -250,3 +250,79 @@ kernel void softmax_half(texture2d_array<half, access::read> inTexture [[texture
rr = exp(rr - maxv) / sum;
outTexture.write(rr, gid.xy, gid.z);
}
kernel void prior_box(texture2d_array<float, access::read> inTexture [[texture(0)]],
texture2d_array<float, access::write> outTexture [[texture(1)]],
uint3 gid [[thread_position_in_grid]]) {
int max_sizes_size;
float max_sizes[2];
bool clip;
float img_width;
float img_height;
float step_width;
float step_height;
float offset;
float aspect_ratios[2];
int aspect_ratios_size;
float center_x = (gid.x + offset) * step_width;
float center_y = (gid.y + offset) * step_width;
float box_width, box_height;
int min_sizes_size;
float min_sizes[2];
float min_size;
float max_size;
if (gid.z < aspect_ratios_size) {
float ar = aspect_ratios[gid.z];
box_width = min_size * sqrt(ar) / 2;
box_height = min_size / sqrt(ar) / 2;
float4 box;
box.x = (center_x - box_width) / img_width;
box.y = (center_y - box_height) / img_height;
box.z = (center_x + box_width) / img_width;
box.w = (center_y + box_height) / img_height;
float4 res;
if (clip) {
res = min(max(box, 0.0), 1.0);
} else {
res = box;
}
outTexture.write(res, gid.xy, gid.z);
} else if (gid.z >= aspect_ratios_size) {
int max_index = gid.z - aspect_ratios_size;
if (max_sizes_size > 0 && min_sizes_size > 0) {
box_width = box_height = sqrt(min_size * max_size) / 2;
float4 max_box;
max_box.x = (center_x - box_width) / img_width;
max_box.y = (center_y - box_height) / img_height;
max_box.z = (center_x + box_width) / img_width;
max_box.w = (center_y + box_height) / img_height;
float4 res;
if (clip) {
res = min(max(max_box, 0.0), 1.0);
} else {
res = max_box;
}
outTexture.write(max_box, gid.xy, gid.z);
}
}
}
......@@ -17,15 +17,15 @@ import Foundation
struct TensorDesc {
let dims: [Int]
let dataType: VarTypeType
let dataLayout: DataLayout = .NCHW
let dataLayout: DataLayout = DataLayout.NHWC()
var NCHWDim: [Int] {
get {
if dims.count != 4 {
return dims
}
if dataLayout == .NCHW {
if dataLayout == DataLayout.NCHW() {
return dims
} else if dataLayout == .NHWC{
} else if dataLayout == DataLayout.NHWC() {
var resultDims = dims
resultDims.swapAt(1, 3)
return resultDims
......@@ -40,9 +40,9 @@ struct TensorDesc {
if dims.count != 4 {
return dims
}
if dataLayout == .NHWC {
if dataLayout == DataLayout.NHWC() {
return dims
} else if dataLayout == .NCHW{
} else if dataLayout == DataLayout.NCHW() {
var resultDims = dims
resultDims.swapAt(1, 3)
return resultDims
......
......@@ -61,7 +61,7 @@ class Tensor<P: PrecisionType>: Tensorial {
}
}
required init(inDim: Dim, inLayout: DataLayout = .NCHW) {
required init(inDim: Dim, inLayout: DataLayout = DataLayout.NCHW()) {
dim = inDim
let size = inDim.numel() * MemoryLayout<P>.size
let pointer = UnsafeMutablePointer<P>.allocate(capacity: size)
......@@ -78,13 +78,13 @@ class Tensor<P: PrecisionType>: Tensorial {
return
}
guard layout == .NCHW && to == .NHWC else {
guard layout == DataLayout.NCHW() && to == DataLayout.NHWC() else {
// other not support
return
}
let newPointer = UnsafeMutablePointer<P>.allocate(capacity: data.size)
if layout == .NCHW {
if layout == DataLayout.NCHW() {
NCHW2NHWC(newPtr: newPointer)
}
......@@ -106,7 +106,6 @@ class Tensor<P: PrecisionType>: Tensorial {
fatalError(" not support yet ")
}
let precisionSize: Int
switch precision {
case .Float32:
......@@ -116,7 +115,7 @@ class Tensor<P: PrecisionType>: Tensorial {
}
if dim.cout() == 4 {
if layout == .NHWC {
if layout == DataLayout.NHWC() {
let C = dim[3]
let cSlices = (C + 3) / 4
let paddedC = cSlices * 4
......@@ -232,7 +231,6 @@ class Tensor<P: PrecisionType>: Tensorial {
}
}
extension Tensor {
var debugDescription: String {
......
......@@ -40,59 +40,34 @@ extension InputTexture {
public class Texture<P: PrecisionType>: Tensorial {
var dim: Dim
let textureDesc: MTLTextureDescriptor
var metalTexture: MTLTexture
private(set) var originDim: Dim
private var textureDesc: MTLTextureDescriptor!
var metalTexture: MTLTexture!
var transpose: [Int] = [0, 1, 2, 3]
init(device: MTLDevice, inDim: Dim, inLayout: DataLayout = .NHWC) {
dim = inDim
layout = inLayout
func initTexture(device: MTLDevice, transpose: [Int]) {
let newDim = transpose.map { originDim[$0] }
let newLayout = transpose.map {layout.layoutWithDim[$0] }
layout = DataLayout.init(newLayout)
dim = Dim.init(inDim: newDim)
let tmpTextureDes = MTLTextureDescriptor.init()
if inDim.cout() == 1 {
tmpTextureDes.width = inDim[0]
tmpTextureDes.textureType = .type1D
} else if inDim.cout() == 4 {
tmpTextureDes.height = inDim[1]
tmpTextureDes.width = inDim[2]
tmpTextureDes.depth = 1
tmpTextureDes.arrayLength = (inDim[3] * inDim[0] + 3)/4
tmpTextureDes.textureType = .type2DArray
} else if inDim.cout() == 2 {
// tmpTextureDes.height = 1
// tmpTextureDes.width = 1
// tmpTextureDes.depth = 1
// tmpTextureDes.arrayLength = (inDim[0] * inDim[1] + 3)/4
tmpTextureDes.width = inDim[0]
tmpTextureDes.height = inDim[1]
tmpTextureDes.depth = 1
tmpTextureDes.arrayLength = 1
tmpTextureDes.textureType = .type2DArray
} else {
/*
var name: box_coder_0.tmp_0
in var tensor desc dims size: 3
var tensor desc dim 0 value: -1
var tensor desc dim 1 value: 1917
var tensor desc dim 2 value: 4
*/
tmpTextureDes.height = inDim[1]
tmpTextureDes.width = inDim[2]
tmpTextureDes.depth = 1
tmpTextureDes.arrayLength = 1
tmpTextureDes.textureType = .type2DArray
}
tmpTextureDes.width = layout.W ?? 1
tmpTextureDes.height = layout.H ?? 1
tmpTextureDes.depth = 1
tmpTextureDes.arrayLength = ((layout.N ?? 1) * (layout.C ?? 1) + 3) / 4
tmpTextureDes.textureType = .type2DArray
if MemoryLayout<P>.size == 1 {
tmpTextureDes.pixelFormat = .rgba8Unorm
} else if MemoryLayout<P>.size == 2 {
tmpTextureDes.pixelFormat = .rgba16Float
} else if MemoryLayout<P>.size == 4 {
// tmpTextureDes.pixelFormat = .r32Float
tmpTextureDes.pixelFormat = .rgba32Float
}
// tmpTextureDes.pixelFormat = .rgba16Float
tmpTextureDes.usage = [.shaderRead, .shaderWrite]
tmpTextureDes.storageMode = .shared
......@@ -100,6 +75,26 @@ public class Texture<P: PrecisionType>: Tensorial {
metalTexture = device.makeTexture(descriptor: tmpTextureDes) ?! " texture nil "
}
init(device: MTLDevice, inDim: Dim) {
var fourDim: Dim
if inDim.cout() == 4 {
fourDim = inDim
} else if inDim.cout() < 4 {
var fourDimNum: [Int] = []
for _ in 0..<(4 - inDim.cout()) {
fourDimNum.append(1)
}
fourDimNum.append(contentsOf: inDim.dims)
fourDim = Dim.init(inDim: fourDimNum)
} else {
fatalError(" not support ")
}
dim = fourDim
originDim = fourDim
layout = DataLayout.init([(.N, fourDim[0]), (.C, fourDim[1]), (.H, fourDim[2]), (.W, fourDim[3])])
}
// required public init(inDim: Dim, inLayout: DataLayout = .NHWC, inTexture: MTLTexture) {
// dim = inDim
// layout = inLayout
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册