提交 6ec031ff 编写于 作者: L liuruilong

add dilation

上级 ee6ef4d9
......@@ -26,7 +26,7 @@ let modelHelperMap: [SupportModel : Net] = [.mobilenet_ssd : MobileNet_ssd_hand.
enum SupportModel: String{
// case mobilenet = "mobilenet"
case mobilenet_ssd = "mobilenetssd"
case genet = "enet"
case genet = "genet"
static func supportedModels() -> [SupportModel] {
//.mobilenet,
return [.mobilenet_ssd ,.genet]
......@@ -79,7 +79,7 @@ class ViewController: UIViewController {
return
}
do {
let max = 1
let max = 10
let startDate = Date.init()
for i in 0..<max {
try net.predict(inTexture: inTexture) { [weak self] (result) in
......@@ -87,6 +87,7 @@ class ViewController: UIViewController {
fatalError()
}
print(result.resultArray)
if i == max - 1 {
let time = Date.init().timeIntervalSince(startDate)
DispatchQueue.main.async {
......
......@@ -699,6 +699,7 @@
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
MACH_O_TYPE = mh_dylib;
MTL_LANGUAGE_REVISION = UseDeploymentTarget;
PRODUCT_BUNDLE_IDENTIFIER = "orange.paddle-mobile";
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
......@@ -727,6 +728,7 @@
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
MACH_O_TYPE = mh_dylib;
MTL_LANGUAGE_REVISION = UseDeploymentTarget;
PRODUCT_BUNDLE_IDENTIFIER = "orange.paddle-mobile";
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
......
......@@ -342,7 +342,7 @@ public extension MTLTexture {
// n c h w - dim
func toTensor(dim: (n: Int, c: Int, h: Int, w: Int)) -> [Float32] {
print("origin dim: \(dim)")
// print("origin dim: \(dim)")
print("texture: ")
print(self)
......
......@@ -314,7 +314,7 @@ public class PaddleMobileUnitTest {
let offsetX = filterSize.width/2 - paddings.0
let offsetY = filterSize.height/2 - paddings.1
let metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: 0, strideX: UInt16(stride.0), strideY: UInt16(stride.1), paddedZ: UInt16(paddings.0))
let metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: 0, strideX: UInt16(stride.0), strideY: UInt16(stride.1), paddedZ: UInt16(paddings.0), dilationX: UInt16(1), dilationY: UInt16(1))
let param = ConvAddBatchNormReluTestParam.init(inInputTexture: inputeTexture, inOutputTexture: outputTexture, inMetalParam: metalParam, inFilterBuffer: filterBuffer, inBiaseBuffer: biaseBuffer, inNewScaleBuffer: newScalueBuffer, inNewBiaseBuffer: newBiaseBuffer, inFilterSize: filterSize)
......
......@@ -14,7 +14,7 @@
import Foundation
let testTo = 12
let testTo = 54
public class ResultHolder<P: PrecisionType> {
public let dim: [Int]
......@@ -62,7 +62,7 @@ public class Executor<P: PrecisionType> {
queue = inQueue
for block in inProgram.programDesc.blocks {
//block.ops.count
for i in 0..<testTo {
for i in 0..<block.ops.count {
let op = block.ops[i]
do {
let op = try OpCreator<P>.shared.creat(device: inDevice, opDesc: op, scope: inProgram.scope)
......@@ -124,13 +124,13 @@ public class Executor<P: PrecisionType> {
// print(stridableInput)
// let _: Flo? = input.logDesc(header: "input: ", stridable: true)
for i in 0..<self.ops.count {
let op = self.ops[i]
print(" 第 \(i) 个 op: ")
op.delogOutput()
}
return
// for i in 0..<self.ops.count {
// let op = self.ops[i]
// print(" 第 \(i) 个 op: ")
// op.delogOutput()
// }
//
// return
let afterDate = Date.init()
......@@ -145,7 +145,6 @@ public class Executor<P: PrecisionType> {
return p
}), inElapsedTime: afterDate.timeIntervalSince(beforeDate))
}
completionHandle(resultHolder)
}
......
......@@ -16,168 +16,170 @@ import Foundation
import SwiftProtobuf
public class Loader<P: PrecisionType> {
class ParaLoader {
let file: UnsafeMutablePointer<FILE>
let fileSize: Int
var nowIndex: Int
init(paramPath: String) throws {
guard let tmpFile = fopen(paramPath, "rb") else {
throw PaddleMobileError.loaderError(message: "open param file error" + paramPath)
}
file = tmpFile
fseek(file, 0, SEEK_END)
fileSize = ftell(file)
guard fileSize > 0 else {
throw PaddleMobileError.loaderError(message: "param file size is too small")
}
rewind(file)
nowIndex = 0
}
func read(tensor: Tensor<P>) throws {
guard nowIndex <= fileSize else {
throw PaddleMobileError.loaderError(message: "out of the file range")
}
func pointerReader<T>(type: T.Type) -> T {
let ptr = UnsafeMutablePointer<T>.allocate(capacity: MemoryLayout<T>.size)
fread(ptr, 1, MemoryLayout<T>.size, file)
nowIndex += MemoryLayout<T>.size
let pointee = ptr.pointee
ptr.deinitialize(count: MemoryLayout<UInt32>.size)
ptr.deallocate()
return pointee
}
let _ = pointerReader(type: UInt32.self)
let lodLevel = pointerReader(type: UInt64.self)
for _ in 0..<lodLevel {
let size = pointerReader(type: UInt64.self)
for _ in 0..<Int(size/UInt64(MemoryLayout<size_t>.size)){
_ = pointerReader(type: size_t.self)
}
}
let _ = pointerReader(type: UInt32.self)
let tensorDescSize = pointerReader(type: Int32.self)
fseek(file, Int(tensorDescSize), SEEK_CUR)
nowIndex += Int(tensorDescSize)
/*
这里没有根据 Data Type 去判断, 而是从外部泛型直接指定了精度
*/
//现在模型传入模型为 Float 类型, 这块应该根据模型来
// let tmpCapacity = MemoryLayout<Float>.size * tensor.numel()
// let tmpPointer = UnsafeMutablePointer<Float>.allocate(capacity: tmpCapacity);
let bytesRead = fread(tensor.data.pointer, 1, tensor.data.size, file)
guard bytesRead == tensor.data.size else {
throw PaddleMobileError.loaderError(message: "param read size error")
}
// TODO: use script to convert
// let bytesRead = fread(tmpPointer, 1, tmpCapacity, file)
// for i in 0..<tensor.numel() {
// tensor.data[i] = P.init(inFloat: tmpPointer[i])
// }
// tmpPointer.deinitialize(count: tmpCapacity)
// tmpPointer.deallocate()
nowIndex += bytesRead
}
deinit {
fclose(file)
}
class ParaLoader {
let file: UnsafeMutablePointer<FILE>
let fileSize: Int
var nowIndex: Int
init(paramPath: String) throws {
guard let tmpFile = fopen(paramPath, "rb") else {
throw PaddleMobileError.loaderError(message: "open param file error" + paramPath)
}
file = tmpFile
fseek(file, 0, SEEK_END)
fileSize = ftell(file)
guard fileSize > 0 else {
throw PaddleMobileError.loaderError(message: "param file size is too small")
}
rewind(file)
nowIndex = 0
}
public init(){}
public func load(device: MTLDevice, modelPath: String, paraPath: String) throws -> Program{
guard let modelData = try? Data.init(contentsOf: URL.init(fileURLWithPath: modelPath)) else {
throw PaddleMobileError.loaderError(message: "load " + modelPath + " failed !")
func read(tensor: Tensor<P>) throws {
guard nowIndex <= fileSize else {
throw PaddleMobileError.loaderError(message: "out of the file range")
}
func pointerReader<T>(type: T.Type) -> T {
let ptr = UnsafeMutablePointer<T>.allocate(capacity: MemoryLayout<T>.size)
fread(ptr, 1, MemoryLayout<T>.size, file)
nowIndex += MemoryLayout<T>.size
let pointee = ptr.pointee
ptr.deinitialize(count: MemoryLayout<UInt32>.size)
ptr.deallocate()
return pointee
}
let _ = pointerReader(type: UInt32.self)
let lodLevel = pointerReader(type: UInt64.self)
for _ in 0..<lodLevel {
let size = pointerReader(type: UInt64.self)
for _ in 0..<Int(size/UInt64(MemoryLayout<size_t>.size)){
_ = pointerReader(type: size_t.self)
}
do {
let protoProgram = try PaddleMobile_Framework_Proto_ProgramDesc.init(
serializedData: modelData)
let originProgramDesc = ProgramDesc.init(protoProgram: protoProgram)
let programDesc = ProgramOptimize<P>.init().optimize(originProgramDesc: originProgramDesc)
print(programDesc)
guard let paraLoader = try? ParaLoader.init(paramPath: paraPath) else {
throw PaddleMobileError.loaderError(message: "load para error")
}
guard programDesc.blocks.count > 0 else {
throw PaddleMobileError.loaderError(message: "count of blocks must greater than 0")
}
// to get feed key and fetch key
let block = programDesc.blocks[0]
guard let firstOp = block.ops.first, let lastOp = block.ops.last else {
throw PaddleMobileError.loaderError(message: "at least two operator")
}
guard firstOp.type == gFeedType, lastOp.type == gFetchType else {
throw PaddleMobileError.loaderError(message: "the first op is not feed or the last op is not fetch")
}
let _ = pointerReader(type: UInt32.self)
let tensorDescSize = pointerReader(type: Int32.self)
fseek(file, Int(tensorDescSize), SEEK_CUR)
nowIndex += Int(tensorDescSize)
/*
这里没有根据 Data Type 去判断, 而是从外部泛型直接指定了精度
*/
//现在模型传入模型为 Float 类型, 这块应该根据模型来
// let tmpCapacity = MemoryLayout<Float>.size * tensor.numel()
// let tmpPointer = UnsafeMutablePointer<Float>.allocate(capacity: tmpCapacity);
let bytesRead = fread(tensor.data.pointer, 1, tensor.data.size, file)
guard bytesRead == tensor.data.size else {
throw PaddleMobileError.loaderError(message: "param read size error")
}
// TODO: use script to convert
// let bytesRead = fread(tmpPointer, 1, tmpCapacity, file)
// for i in 0..<tensor.numel() {
// tensor.data[i] = P.init(inFloat: tmpPointer[i])
// }
// tmpPointer.deinitialize(count: tmpCapacity)
// tmpPointer.deallocate()
nowIndex += bytesRead
}
deinit {
fclose(file)
}
}
public init(){}
public func load(device: MTLDevice, modelPath: String, paraPath: String) throws -> Program{
guard let modelData = try? Data.init(contentsOf: URL.init(fileURLWithPath: modelPath)) else {
throw PaddleMobileError.loaderError(message: "load " + modelPath + " failed !")
}
do {
let protoProgram = try PaddleMobile_Framework_Proto_ProgramDesc.init(
serializedData: modelData)
let originProgramDesc = ProgramDesc.init(protoProgram: protoProgram)
let programDesc = ProgramOptimize<P>.init().optimize(originProgramDesc: originProgramDesc)
print(programDesc)
guard let paraLoader = try? ParaLoader.init(paramPath: paraPath) else {
throw PaddleMobileError.loaderError(message: "load para error")
}
guard programDesc.blocks.count > 0 else {
throw PaddleMobileError.loaderError(message: "count of blocks must greater than 0")
}
// to get feed key and fetch key
let block = programDesc.blocks[0]
guard let firstOp = block.ops.first, let lastOp = block.ops.last else {
throw PaddleMobileError.loaderError(message: "at least two operator")
}
guard firstOp.type == gFeedType, lastOp.type == gFetchType else {
throw PaddleMobileError.loaderError(message: "the first op is not feed or the last op is not fetch")
}
guard let inputKey = opInfos[gFeedType]?.inputs.first, let outKey = opInfos[gFetchType]?.outputs.first else {
throw PaddleMobileError.loaderError(message: "the feed input key or fetch output key not found")
}
guard let feedKey = firstOp.inputs[inputKey]?.first, let fetchKey = lastOp.outputs[outKey]?.first else {
throw PaddleMobileError.loaderError(message: "feed key or fetch key not found")
}
let scope = Scope.init(inFeedKey: feedKey, inFetchKey: fetchKey)
// to load memory
for block in programDesc.blocks {
for varDesc in block.vars {
if (varDesc.type == .LodTensor) {
guard let tensorDesc = varDesc.tensorDesc else {
throw PaddleMobileError.loaderError(message: "get tensor desc failed")
}
guard let inputKey = opInfos[gFeedType]?.inputs.first, let outKey = opInfos[gFetchType]?.outputs.first else {
throw PaddleMobileError.loaderError(message: "the feed input key or fetch output key not found")
}
guard let feedKey = firstOp.inputs[inputKey]?.first, let fetchKey = lastOp.outputs[outKey]?.first else {
throw PaddleMobileError.loaderError(message: "feed key or fetch key not found")
if (varDesc.persistable
&& varDesc.type != .FeedMiniBatch
&& varDesc.type != .FetchList) {
let dimArr = tensorDesc.dims
guard dimArr.count > 0 else {
throw PaddleMobileError.loaderError(message: "tensor desc dim size error")
}
let dim = Dim.init(inDim: dimArr)
let tensor = Tensor<P>.init(inDim: dim, inLayout: tensorDesc.dataLayout)
do {
try paraLoader.read(tensor: tensor)
} catch let error {
throw error
}
tensor.convert(to: DataLayout.NHWC())
// tensor.initBuffer(device: device)
scope[varDesc.name] = tensor
} else {
let dim = Dim.init(inDim: tensorDesc.dims)
scope[varDesc.name] = Texture<P>.init(device: device, inDim: dim)
}
let scope = Scope.init(inFeedKey: feedKey, inFetchKey: fetchKey)
// to load memory
for block in programDesc.blocks {
for varDesc in block.vars {
if (varDesc.type == .LodTensor) {
guard let tensorDesc = varDesc.tensorDesc else {
throw PaddleMobileError.loaderError(message: "get tensor desc failed")
}
if (varDesc.persistable
&& varDesc.type != .FeedMiniBatch
&& varDesc.type != .FetchList) {
let dimArr = tensorDesc.dims
guard dimArr.count > 0 else {
throw PaddleMobileError.loaderError(message: "tensor desc dim size error")
}
let dim = Dim.init(inDim: dimArr)
let tensor = Tensor<P>.init(inDim: dim, inLayout: tensorDesc.dataLayout)
do {
try paraLoader.read(tensor: tensor)
} catch let error {
throw error
}
tensor.convert(to: DataLayout.NHWC())
// tensor.initBuffer(device: device)
scope[varDesc.name] = tensor
} else {
let dim = Dim.init(inDim: tensorDesc.dims)
scope[varDesc.name] = Texture<P>.init(device: device, inDim: dim)
}
} else {
if varDesc.name == fetchKey {
scope[varDesc.name] = ResultHolder<P>.init(inDim: [], inResult: [], inElapsedTime: 0.0)
} else if varDesc.name == feedKey {
}
}
}
} else {
if varDesc.name == fetchKey {
scope[varDesc.name] = ResultHolder<P>.init(inDim: [], inResult: [], inElapsedTime: 0.0)
} else if varDesc.name == feedKey {
}
let program = Program.init(inProgramDesc: programDesc, inParamPath: paraPath, inScope: scope)
return program
} catch _ {
throw PaddleMobileError.loaderError(message: "protobuf decoder error")
}
}
}
let program = Program.init(inProgramDesc: programDesc, inParamPath: paraPath, inScope: scope)
return program
} catch _ {
throw PaddleMobileError.loaderError(message: "protobuf decoder error")
}
}
}
......@@ -97,6 +97,13 @@ class ConvAddOp<P: PrecisionType>: Operator<ConvAddKernel<P>, ConvAddParam<P>>,
}
func delogOutput() {
print("stride: ")
print(para.stride)
print("dilations: ")
print(para.dilations)
print(" \(type) output: ")
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
}
......
......@@ -43,8 +43,15 @@ class ConvTransposeOp<P: PrecisionType>: Operator<ConvTransposeKernel<P>, ConvTr
}
func delogOutput() {
print("conv transpose delog")
let _: P? = para.input.metalTexture.logDesc(header: "conv transpose input: ", stridable: true)
let _: P? = para.output.metalTexture.logDesc(header: "conv transpose output: ", stridable: true)
print(" \(type) output: ")
let originDim = para.output.originDim
if para.output.transpose == [0, 1, 2, 3] {
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
} else if para.output.transpose == [0, 2, 3, 1] {
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
} else {
print(" not implement")
}
}
}
......@@ -61,11 +61,18 @@ class ElementwiseAddOp<P: PrecisionType>: Operator<ElementwiseAddKernel<P>, Elem
print(para.inputX.metalTexture.toTensor(dim: (n: para.inputX.tensorDim[0], c: para.inputX.tensorDim[1], h: para.inputX.tensorDim[2], w: para.inputX.tensorDim[3])).strideArray())
print(" \(type) inputY: ")
print(para.inputY.metalTexture.toTensor(dim: (n: para.inputY.tensorDim[0], c: para.inputY.tensorDim[1], h: para.inputY.tensorDim[2], w: para.inputY.tensorDim[3])).strideArray())
print(" \(type) output: ")
let originDim = para.output.originDim
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
if para.output.transpose == [0, 1, 2, 3] {
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
} else if para.output.transpose == [0, 2, 3, 1] {
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
} else {
print(" not implement")
}
}
func runImpl(device: MTLDevice, buffer: MTLCommandBuffer) throws {
......
......@@ -75,7 +75,7 @@ class ConvAddBatchNormReluKernel<P: PrecisionType>: Kernel, Computable, Testable
print("offset y: \(offsetY)")
let offsetZ = 0.0
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]))
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]), dilationX: UInt16(param.dilations[0]), dilationY: UInt16(param.dilations[1]))
var invs: [P] = []
let varianceContents = param.variance.buffer.contents().assumingMemoryBound(to: P.self)
......
......@@ -27,9 +27,10 @@ class ConvAddKernel<P: PrecisionType>: Kernel, Computable {
param.output.initTexture(device: device, inTranspose: [0, 2, 3, 1])
let offsetX = param.filter.width/2 - Int(param.paddings[0])
let offsetY = param.filter.height/2 - Int(param.paddings[1])
let offsetX = (Int(param.dilations[0]) * (param.filter.width - 1) + 1)/2 - Int(param.paddings[0])
let offsetY = (Int(param.dilations[1]) * (param.filter.height - 1) + 1)/2 - Int(param.paddings[1])
param.filter.initBuffer(device: device, precision: Tensor.BufferPrecision.Float32)
param.y.initBuffer(device: device, precision: Tensor.BufferPrecision.Float32)
......@@ -37,7 +38,11 @@ class ConvAddKernel<P: PrecisionType>: Kernel, Computable {
print("offset y: \(offsetY)")
let offsetZ = 0.0
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]))
let inMetalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]), dilationX: UInt16(param.dilations[0]), dilationY: UInt16(param.dilations[1]))
print("metal param: ")
print(inMetalParam)
metalParam = inMetalParam
}
func compute(commandBuffer: MTLCommandBuffer, param: ConvAddParam<P>) throws {
......
......@@ -81,7 +81,7 @@ class ConvBNReluKernel<P: PrecisionType>: Kernel, Computable, Testable {
let offsetZ = 0.0
print(" fuck ")
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]))
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]), dilationX: UInt16(param.dilations[0]), dilationY: UInt16(param.dilations[1]))
var invs: [P] = []
let varianceContents = param.variance.buffer.contents().assumingMemoryBound(to: P.self)
......
......@@ -21,6 +21,8 @@ public struct MetalConvParam {
let strideX: UInt16
let strideY: UInt16
let paddedZ: UInt16
let dilationX: UInt16
let dilationY: UInt16
}
class ConvKernel<P: PrecisionType>: Kernel, Computable {
......@@ -39,7 +41,7 @@ class ConvKernel<P: PrecisionType>: Kernel, Computable {
let offsetZ = 0.0
param.filter.initBuffer(device: device, precision: Tensor.BufferPrecision.Float32)
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]))
metalParam = MetalConvParam.init(offsetX: Int16(offsetX), offsetY: Int16(offsetY), offsetZ: Int16(offsetZ), strideX: UInt16(param.stride[0]), strideY: UInt16(param.stride[1]), paddedZ: UInt16(param.input.metalTexture.arrayLength * 4 - param.input.dim[3]), dilationX: UInt16(param.dilations[0]), dilationY: UInt16(param.dilations[1]))
}
func compute(commandBuffer: MTLCommandBuffer, param: ConvParam<P>) throws {
......
......@@ -45,6 +45,7 @@ class ConvTransposeKernel<P: PrecisionType>: Kernel, Computable{
metalParam = MetalConvTransposeParam.init(kernelW: kernelWidth, kernelH: kernelHeight, strideX: strideX, strideY: strideY, paddingX: paddingX, paddingY: paddingY, dilationX: dilationX, dilationY: dilationY)
param.output.initTexture(device: device, inTranspose: param.input.transpose)
param.filter.initBuffer(device: device)
}
func compute(commandBuffer: MTLCommandBuffer, param: ConvTransposeParam<P>) throws {
......
......@@ -55,7 +55,7 @@ class ElementwiseAddKernel<P: PrecisionType>: Kernel, Computable {
}
emp.yoff = 4 - Int32(param.inputY.tensorDim.cout())
if (param.inputX.dim == param.inputY.dim) && (param.inputX.transpose == param.inputY.transpose) {
print("===> elementwise_add fast!!!")
// print("===> elementwise_add fast!!!")
emp.fast = 1
}
......
......@@ -48,8 +48,10 @@ extension BlockDesc: CustomStringConvertible, CustomDebugStringConvertible {
var description: String {
var str = ""
for op in ops {
str += op.description
for i in 0..<ops.count {
str += " op \(i): "
let op = ops[i]
str += op.description
}
for varDesc in vars {
......
......@@ -16,245 +16,246 @@ import Accelerate
import Foundation
protocol Tensorial: CustomStringConvertible, CustomDebugStringConvertible{
var dim: Dim { get set }
func numel() -> Int
var layout: DataLayout { get }
var dim: Dim { get set }
func numel() -> Int
var layout: DataLayout { get }
}
extension Tensorial {
func numel() -> Int {
return dim.numel()
}
func numel() -> Int {
return dim.numel()
}
}
class Tensor<P: PrecisionType>: Tensorial {
enum BufferPrecision {
case Float32, Float16
enum BufferPrecision {
case Float32, Float16
}
var data: Data
var dim: Dim
var buffer: MTLBuffer!
private(set) var layout: DataLayout
class Data {
init(inSize: Int, inPointer: UnsafeMutablePointer<P>) {
size = inSize
pointer = inPointer
}
var data: Data
var dim: Dim
var buffer: MTLBuffer!
private(set) var layout: DataLayout
class Data {
init(inSize: Int, inPointer: UnsafeMutablePointer<P>) {
size = inSize
pointer = inPointer
}
let size: Int
var pointer: UnsafeMutablePointer<P>
subscript(index: Int) -> P{
get {
return pointer[index]
}
set {
pointer[index] = newValue
}
}
func release() {
pointer.deinitialize(count: size)
pointer.deallocate()
}
deinit {
// release()
}
let size: Int
var pointer: UnsafeMutablePointer<P>
subscript(index: Int) -> P{
get {
return pointer[index]
}
set {
pointer[index] = newValue
}
}
required init(inDim: Dim, inLayout: DataLayout = DataLayout.NCHW()) {
dim = inDim
let size = inDim.numel() * MemoryLayout<P>.size
let pointer = UnsafeMutablePointer<P>.allocate(capacity: size)
data = Data.init(inSize: size, inPointer: pointer)
layout = inLayout
func release() {
pointer.deinitialize(count: size)
pointer.deallocate()
}
deinit {
// release()
}
}
required init(inDim: Dim, inLayout: DataLayout = DataLayout.NCHW()) {
dim = inDim
let size = inDim.numel() * MemoryLayout<P>.size
let pointer = UnsafeMutablePointer<P>.allocate(capacity: size)
data = Data.init(inSize: size, inPointer: pointer)
layout = inLayout
}
func convert(to: DataLayout) {
guard to != layout else {
return
}
func convert(to: DataLayout) {
guard to != layout else {
return
}
guard dim.cout() == 4 else {
return
}
guard layout == DataLayout.NCHW() && to == DataLayout.NHWC() else {
// other not support
return
}
let newPointer = UnsafeMutablePointer<P>.allocate(capacity: data.size)
if layout == DataLayout.NCHW() {
NCHW2NHWC(newPtr: newPointer)
}
data.release()
data.pointer = newPointer
layout = to
guard dim.cout() == 4 else {
return
}
func float32ToFloat16(input: UnsafeMutablePointer<Float32>, output: UnsafeMutableRawPointer, count: Int) {
var float32Buffer = vImage_Buffer(data: input, height: 1, width: UInt(count), rowBytes: count * 4)
var float16buffer = vImage_Buffer(data: output, height: 1, width: UInt(count), rowBytes: count * 2)
guard vImageConvert_PlanarFtoPlanar16F(&float32Buffer, &float16buffer, 0) == kvImageNoError else {
fatalError(" float 32 to float 16 error ! ")
}
guard layout == DataLayout.NCHW() && to == DataLayout.NHWC() else {
// other not support
return
}
let newPointer = UnsafeMutablePointer<P>.allocate(capacity: data.size)
func initBuffer(device: MTLDevice, precision: BufferPrecision = .Float32) {
guard let floatPointer = data.pointer as? UnsafeMutablePointer<Float32> else {
fatalError(" not support yet ")
}
let precisionSize: Int
switch precision {
case .Float32:
precisionSize = 4
case .Float16:
precisionSize = 2
}
if dim.cout() == 4 {
if layout == DataLayout.NHWC() {
let C = dim[3]
let cSlices = (C + 3) / 4
let paddedC = cSlices * 4
let count = paddedC * dim[0] * dim[1] * dim[2]
if C == paddedC {
buffer = device.makeBuffer(length: count * precisionSize)
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: data.pointer, byteCount: count * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: floatPointer, output: buffer.contents(), count: count)
}
} else if C == 1 {
buffer = device.makeBuffer(length: numel() * precisionSize)
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: data.pointer, byteCount: numel() * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: floatPointer, output: buffer.contents(), count: numel())
}
} else {
buffer = device.makeBuffer(length: count * precisionSize)
let convertedPointer = UnsafeMutablePointer<Float32>.allocate(capacity: count)
var tmpPointer = floatPointer
var dstPtr = convertedPointer
for _ in 0..<dim[0] * dim[1] * dim[2] {
for j in 0..<paddedC {
if j < C {
dstPtr[j] = tmpPointer[j]
}
}
tmpPointer += C
dstPtr += paddedC
}
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: convertedPointer, byteCount: count * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: convertedPointer, output: buffer.contents(), count: count)
}
convertedPointer.deinitialize(count: count)
convertedPointer.deallocate()
}
}
} else if dim.cout() == 1 {
buffer = device.makeBuffer(length: numel() * precisionSize)
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: data.pointer, byteCount: numel() * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: floatPointer, output: buffer.contents(), count: numel())
}
} else {
fatalError(" not support !")
}
//TODO: release
data.release()
if layout == DataLayout.NCHW() {
NCHW2NHWC(newPtr: newPointer)
}
var width: Int {
get {
if dim.cout() == 4 {
return dim[1]
} else {
fatalError()
}
}
data.release()
data.pointer = newPointer
layout = to
}
func float32ToFloat16(input: UnsafeMutablePointer<Float32>, output: UnsafeMutableRawPointer, count: Int) {
var float32Buffer = vImage_Buffer(data: input, height: 1, width: UInt(count), rowBytes: count * 4)
var float16buffer = vImage_Buffer(data: output, height: 1, width: UInt(count), rowBytes: count * 2)
guard vImageConvert_PlanarFtoPlanar16F(&float32Buffer, &float16buffer, 0) == kvImageNoError else {
fatalError(" float 32 to float 16 error ! ")
}
}
func initBuffer(device: MTLDevice, precision: BufferPrecision = .Float32) {
guard let floatPointer = data.pointer as? UnsafeMutablePointer<Float32> else {
fatalError(" not support yet ")
}
var height: Int {
get {
if dim.cout() == 4 {
return dim[2]
} else {
fatalError()
}
}
let precisionSize: Int
switch precision {
case .Float32:
precisionSize = 4
case .Float16:
precisionSize = 2
}
var channel: Int {
get {
if dim.cout() == 4 {
return dim[3]
} else {
fatalError()
if dim.cout() == 4 {
if layout == DataLayout.NHWC() {
let C = dim[3]
let cSlices = (C + 3) / 4
let paddedC = cSlices * 4
let count = paddedC * dim[0] * dim[1] * dim[2]
if C == paddedC {
buffer = device.makeBuffer(length: count * precisionSize)
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: data.pointer, byteCount: count * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: floatPointer, output: buffer.contents(), count: count)
}
} else if C == 1 {
buffer = device.makeBuffer(length: numel() * precisionSize)
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: data.pointer, byteCount: numel() * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: floatPointer, output: buffer.contents(), count: numel())
}
} else {
buffer = device.makeBuffer(length: count * precisionSize)
let convertedPointer = UnsafeMutablePointer<Float32>.allocate(capacity: count)
var tmpPointer = floatPointer
var dstPtr = convertedPointer
for _ in 0..<dim[0] * dim[1] * dim[2] {
for j in 0..<paddedC {
if j < C {
dstPtr[j] = tmpPointer[j]
}
}
tmpPointer += C
dstPtr += paddedC
}
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: convertedPointer, byteCount: count * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: convertedPointer, output: buffer.contents(), count: count)
}
convertedPointer.deinitialize(count: count)
convertedPointer.deallocate()
}
}
} else if dim.cout() == 1 {
let num = ((numel() + 3) / 4) * 4
buffer = device.makeBuffer(length: num * precisionSize)
switch precision {
case .Float32:
buffer?.contents().copyMemory(from: data.pointer, byteCount: num * MemoryLayout<P>.stride)
case .Float16:
float32ToFloat16(input: floatPointer, output: buffer.contents(), count: num)
}
} else {
fatalError(" not support !")
}
//TODO: release
data.release()
}
var width: Int {
get {
if dim.cout() == 4 {
return dim[1]
} else {
fatalError()
}
}
}
var height: Int {
get {
if dim.cout() == 4 {
return dim[2]
} else {
fatalError()
}
}
}
var channel: Int {
get {
if dim.cout() == 4 {
return dim[3]
} else {
fatalError()
}
}
}
func NCHW2NHWC(newPtr: UnsafeMutablePointer<P>) {
let N = dim[0]
let C = dim[1]
let H = dim[2]
let W = dim[3]
let HXW = H * W
let CXHXW = C * H * W
func NCHW2NHWC(newPtr: UnsafeMutablePointer<P>) {
let N = dim[0]
let C = dim[1]
let H = dim[2]
let W = dim[3]
let HXW = H * W
let CXHXW = C * H * W
var index: Int = 0
for n in 0..<N {
for h in 0..<H{
for w in 0..<W{
for c in 0..<C{
newPtr[index] = data.pointer[n * CXHXW + c * HXW + h * W + w]
index += 1
}
}
}
var index: Int = 0
for n in 0..<N {
for h in 0..<H{
for w in 0..<W{
for c in 0..<C{
newPtr[index] = data.pointer[n * CXHXW + c * HXW + h * W + w]
index += 1
}
}
dim.swapeDimAt(index1: 1, index2: 3)
}
}
dim.swapeDimAt(index1: 1, index2: 3)
}
}
extension Tensor {
var debugDescription: String {
var str = "dim: \(dim) \n"
str += "MTLBuffer: \(self.buffer) \n"
for i in 0..<buffer.length/MemoryLayout<P>.size {
str += " \(buffer.contents().assumingMemoryBound(to: P.self)[i])"
}
return str
var debugDescription: String {
var str = "dim: \(dim) \n"
str += "MTLBuffer: \(self.buffer) \n"
for i in 0..<buffer.length/MemoryLayout<P>.size {
str += " \(buffer.contents().assumingMemoryBound(to: P.self)[i])"
}
func logDataPointer(header: String = "") {
print(header)
var str = ""
str += "data size: \(data.size) \n"
str += "dim: \(dim) \n"
for i in 0..<numel() {
str += " \(data.pointer[i])"
}
print(str)
return str
}
func logDataPointer(header: String = "") {
print(header)
var str = ""
str += "data size: \(data.size) \n"
str += "dim: \(dim) \n"
for i in 0..<numel() {
str += " \(data.pointer[i])"
}
var description: String {
return debugDescription
}
print(str)
}
var description: String {
return debugDescription
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册