提交 669da3b0 编写于 作者: L liuruilong

update log

上级 dd73f6b6
......@@ -79,7 +79,7 @@ class ViewController: UIViewController {
return
}
do {
let max = 10
let max = 1
let startDate = Date.init()
for i in 0..<max {
try net.predict(inTexture: inTexture) { [weak self] (result) in
......
......@@ -111,6 +111,11 @@ public class Executor<P: PrecisionType> {
buffer.addCompletedHandler { (commandbuffer) in
// let inputArr = resInput.floatArray(res: { (p:P) -> P in
// return p
// })
// print(inputArr.strideArray())
//
// let inputArr = resInput.floatArray(res: { (p:P) -> P in
// return p
// })
......@@ -124,11 +129,11 @@ public class Executor<P: PrecisionType> {
// print(stridableInput)
// let _: Flo? = input.logDesc(header: "input: ", stridable: true)
// for i in 0..<self.ops.count {
// let op = self.ops[i]
// print(" 第 \(i) 个 op: ")
// op.delogOutput()
// }
for i in 0..<self.ops.count {
let op = self.ops[i]
print(" 第 \(i) 个 op: ")
op.delogOutput()
}
//
// return
......
......@@ -76,10 +76,9 @@ class BoxcoderOp<P: PrecisionType>: Operator<BoxcoderKernel<P>, BoxcoderParam<P>
print(targetBoxArray.strideArray())
let originDim = para.output.originDim
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(" output ")
print(outputArray.strideArray())
}
}
......
......@@ -65,10 +65,16 @@ class ConcatOp<P: PrecisionType>: Operator<ConcatKernel<P>, ConcatParam<P>>, Run
func delogOutput() {
print(" \(type) output: ")
let originDim = para.output.originDim
if para.output.transpose == [0, 1, 2, 3] {
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
} else if para.output.transpose == [0, 2, 3, 1] {
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
} else {
fatalError()
}
}
}
......
......@@ -125,13 +125,13 @@ class ConvAddBatchNormReluOp<P: PrecisionType>: Operator<ConvAddBatchNormReluKer
// let _: P? = para.newBiase?.logDesc(header: "new biase: ", stridable: false)
// let _: P? = para.newScale?.logDesc(header: "new scale: ", stridable: false)
let output = para.output.metalTexture.floatArray { (p: P) -> P in
return p
}
//
writeToLibrary(fileName: "output_112x112x32_2", array: output)
print(" write done")
// let _: P? = para.output.metalTexture.logDesc(header: "conv add batchnorm relu output: ", stridable: false)
// let output = para.output.metalTexture.floatArray { (p: P) -> P in
// return p
// }
// //
// writeToLibrary(fileName: "output_112x112x32_2", array: output)
// print(" write done")
//
// // let _: P? = para.output.metalTexture.logDesc(header: "conv add batchnorm relu output: ", stridable: false)
}
}
......@@ -60,9 +60,8 @@ class FeedOp<P: PrecisionType>: Operator<Texture2DTo2DArrayKernel<P>, FeedParam<
}
func delogOutput() {
// para.input.mtlTexture.logDesc()
// let _: P? = para.input.mtlTexture.logDesc(header: "feed input: ", stridable: true)
// let _: P? = para.output.metalTexture.logDesc(header: "feed output: ", stridable: false)
print(" \(type) output: ")
print(para.output.metalTexture.toTensor(dim: (n: para.output.originDim[0], c: para.output.originDim[1], h: para.output.originDim[2], w: para.output.originDim[3])).strideArray())
}
}
......@@ -769,3 +769,4 @@ kernel void depthwise_conv_batch_norm_relu_3x3(texture2d_array<float, access::sa
outTexture.write(output, gid.xy, gid.z);
}
......@@ -48,9 +48,15 @@ class TransposeOp<P: PrecisionType>: Operator<TransposeKernel<P>, TransposeParam
func delogOutput() {
print(" \(type) output: ")
let originDim = para.output.tensorDim
let originDim = para.output.originDim
if para.output.transpose == [0, 1, 2, 3] {
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
} else if para.output.transpose == [0, 2, 3, 1] {
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
} else {
print(" not implement")
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册