提交 0662c069 编写于 作者: L liuruilong

run mobilenet

上级 846bf2ce
......@@ -109,7 +109,7 @@ class ViewController: UIViewController {
threadPickerView.delegate = self
threadPickerView.dataSource = self
selectImage = UIImage.init(named: "hand.jpg")
selectImage = UIImage.init(named: "banana.jpeg")
selectImageView.image = selectImage
net.getTexture(image: selectImage!.cgImage!) {[weak self] (texture) in
self?.toPredictTexture = texture
......
......@@ -119,7 +119,7 @@ public class Executor<P: PrecisionType> {
// })
// print(inputArr.strideArray())
//
// writeToLibrary(fileName: "genet_input_hand", array: inputArr)
// writeToLibrary(fileName: "banana", array: inputArr)
// print("write to library done")
// return
// print(inputArr)
......@@ -128,11 +128,13 @@ public class Executor<P: PrecisionType> {
// print(stridableInput)
// let _: Flo? = input.logDesc(header: "input: ", stridable: true)
// for i in 0..<self.ops.count {
// let op = self.ops[i]
// print(" 第 \(i) 个 op: ")
// op.delogOutput()
// }
for i in 0..<self.ops.count {
let op = self.ops[i]
print(" 第 \(i) 个 op: ")
op.delogOutput()
}
// return;
// self.ops[testTo - 2].delogOutput()
// self.ops[testTo - 1].delogOutput()
// self.ops[60].delogOutput()
......
......@@ -34,7 +34,7 @@ class ConvAddBatchNormReluParam<P: PrecisionType>: OpParam {
scale = try ConvAddBatchNormReluParam.inputScale(inputs: opDesc.paraInputs, from: inScope)
mean = try ConvAddBatchNormReluParam.inputMean(inputs: opDesc.paraInputs, from: inScope)
y = try ConvAddBatchNormReluParam.inputY(inputs: opDesc.inputs, from: inScope)
y = try ConvAddBatchNormReluParam.inputY(inputs: opDesc.paraInputs, from: inScope)
} catch let error {
throw error
}
......
......@@ -93,34 +93,24 @@ class ConvAddOp<P: PrecisionType>: Operator<ConvAddKernel<P>, ConvAddParam<P>>,
}
func delogOutput() {
// print("op \(type): ")
// print(" padding: ")
// print(para.paddings)
// print("stride: ")
// print(para.stride)
// print("dilations: ")
// print(para.dilations)
// print(" para input dim: ")
// print(para.input.dim)
// print(" para filter dim: ")
// print(para.filter.dim)
// print(" para output dim: ")
// print(para.output.dim)
// print(" biase: ")
// let biase: [Float32] = para.y.buffer.array()
// print(biase)
print("op \(type): ")
print(" \(type) output: ")
print(" padding: ")
print(para.paddings)
print("stride: ")
print(para.stride)
print("dilations: ")
print(para.dilations)
print(" para input dim: ")
print(para.input.dim)
print(" para filter dim: ")
print(para.filter.dim)
print(" para output dim: ")
print(para.output.dim)
print(" biase: ")
let biase: [Float32] = para.y.buffer.array()
print(biase)
print(" - filter - ")
let array: [Float32] = para.filter.buffer.array()
print(array)
print(" - y - ")
let yArray: [Float32] = para.y.buffer.array()
print(yArray)
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
}
}
......@@ -81,8 +81,6 @@ class ConvAddBatchNormReluKernel<P: PrecisionType>: Kernel, Computable, Testable
fatalError()
}
let offsetX = param.filter.width/2 - Int(param.paddings[0])
let offsetY = param.filter.height/2 - Int(param.paddings[1])
......@@ -121,10 +119,10 @@ class ConvAddBatchNormReluKernel<P: PrecisionType>: Kernel, Computable, Testable
var newBiaseBuffer: MTLBuffer
var newScaleBuffer: MTLBuffer
if computePrecision == .Float16 {
if computePrecision == .Float32 {
newBiaseBuffer = device.makeBuffer(bytes: newBiase, length: param.bias.buffer.length)!
newScaleBuffer = device.makeBuffer(bytes: newScale, length: param.scale.buffer.length)!
} else if computePrecision == .Float32 {
} else if computePrecision == .Float16 {
newBiaseBuffer = device.makeBuffer(length: param.bias.buffer.length / 2)!
newScaleBuffer = device.makeBuffer(length: param.bias.buffer.length / 2)!
......@@ -151,7 +149,6 @@ class ConvAddBatchNormReluKernel<P: PrecisionType>: Kernel, Computable, Testable
throw PaddleMobileError.predictError(message: " encode is nil")
}
encoder.setTexture(param.input.metalTexture, index: 0)
encoder.setTexture(param.output.metalTexture, index: 1)
encoder.setBytes(&metalParam, length: MemoryLayout<MetalConvParam>.size, index: 0)
......
......@@ -21,6 +21,17 @@ struct SoftmaxMetalParam {
class SoftmaxKernel<P: PrecisionType>: Kernel, Computable{
required init(device: MTLDevice, param: SoftmaxParam<P>) {
param.output.initTexture(device: device, computePrecision: computePrecision)
if computePrecision == .Float32 {
super.init(device: device, inFunctionName: "softmax")
} else if computePrecision == .Float16 {
super.init(device: device, inFunctionName: "softmax_half")
} else {
fatalError()
}
}
func compute(commandBuffer: MTLCommandBuffer, param: SoftmaxParam<P>) throws {
guard let encoder = commandBuffer.makeComputeCommandEncoder() else {
throw PaddleMobileError.predictError(message: " encoder is nil")
......@@ -32,19 +43,12 @@ class SoftmaxKernel<P: PrecisionType>: Kernel, Computable{
N: Int32(param.input.tensorDim[0]),
K: Int32(param.input.tensorDim[1])
)
print(" soft max param: ")
print(smp)
encoder.setBytes(&smp, length: MemoryLayout<SoftmaxMetalParam>.size, index: 0)
encoder.dispatch(computePipline: pipline, outTexture: param.output.metalTexture)
encoder.endEncoding()
}
required init(device: MTLDevice, param: SoftmaxParam<P>) {
param.output.initTexture(device: device, computePrecision: computePrecision)
if computePrecision == .Float32 {
super.init(device: device, inFunctionName: "softmax")
} else if computePrecision == .Float16 {
super.init(device: device, inFunctionName: "softmax_half")
} else {
fatalError()
}
}
}
......@@ -52,7 +52,9 @@ class SoftmaxOp<P: PrecisionType>: Operator<SoftmaxKernel<P>, SoftmaxParam<P>>,
func delogOutput() {
print("softmax delog")
print(para.input)
print(para.output)
let originDim = para.output.originDim
let outputArray: [Float32] = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册