提交 1e5520f7 编写于 作者: L liuruilong

update log

上级 7a455b07
...@@ -79,7 +79,7 @@ class ViewController: UIViewController { ...@@ -79,7 +79,7 @@ class ViewController: UIViewController {
return return
} }
do { do {
let max = 10 let max = 1
let startDate = Date.init() let startDate = Date.init()
for i in 0..<max { for i in 0..<max {
try net.predict(inTexture: inTexture) { [weak self] (result) in try net.predict(inTexture: inTexture) { [weak self] (result) in
......
...@@ -111,6 +111,11 @@ public class Executor<P: PrecisionType> { ...@@ -111,6 +111,11 @@ public class Executor<P: PrecisionType> {
buffer.addCompletedHandler { (commandbuffer) in buffer.addCompletedHandler { (commandbuffer) in
// let inputArr = resInput.floatArray(res: { (p:P) -> P in
// return p
// })
// print(inputArr.strideArray())
//
// let inputArr = resInput.floatArray(res: { (p:P) -> P in // let inputArr = resInput.floatArray(res: { (p:P) -> P in
// return p // return p
// }) // })
...@@ -124,12 +129,12 @@ public class Executor<P: PrecisionType> { ...@@ -124,12 +129,12 @@ public class Executor<P: PrecisionType> {
// print(stridableInput) // print(stridableInput)
// let _: Flo? = input.logDesc(header: "input: ", stridable: true) // let _: Flo? = input.logDesc(header: "input: ", stridable: true)
// for i in 0..<self.ops.count { for i in 0..<self.ops.count {
// let op = self.ops[i] let op = self.ops[i]
// print(" 第 \(i) 个 op: ") print(" 第 \(i) 个 op: ")
// op.delogOutput() op.delogOutput()
// } }
// //
// return // return
let afterDate = Date.init() let afterDate = Date.init()
......
...@@ -76,10 +76,9 @@ class BoxcoderOp<P: PrecisionType>: Operator<BoxcoderKernel<P>, BoxcoderParam<P> ...@@ -76,10 +76,9 @@ class BoxcoderOp<P: PrecisionType>: Operator<BoxcoderKernel<P>, BoxcoderParam<P>
print(targetBoxArray.strideArray()) print(targetBoxArray.strideArray())
let originDim = para.output.originDim let originDim = para.output.originDim
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3])) let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(" output ")
print(outputArray.strideArray()) print(outputArray.strideArray())
} }
} }
......
...@@ -65,10 +65,16 @@ class ConcatOp<P: PrecisionType>: Operator<ConcatKernel<P>, ConcatParam<P>>, Run ...@@ -65,10 +65,16 @@ class ConcatOp<P: PrecisionType>: Operator<ConcatKernel<P>, ConcatParam<P>>, Run
func delogOutput() { func delogOutput() {
print(" \(type) output: ") print(" \(type) output: ")
let originDim = para.output.originDim let originDim = para.output.originDim
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3])) if para.output.transpose == [0, 1, 2, 3] {
print(outputArray.strideArray()) let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray()) print(outputArray.strideArray())
} else if para.output.transpose == [0, 2, 3, 1] {
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
} else {
fatalError()
}
} }
} }
......
...@@ -125,13 +125,13 @@ class ConvAddBatchNormReluOp<P: PrecisionType>: Operator<ConvAddBatchNormReluKer ...@@ -125,13 +125,13 @@ class ConvAddBatchNormReluOp<P: PrecisionType>: Operator<ConvAddBatchNormReluKer
// let _: P? = para.newBiase?.logDesc(header: "new biase: ", stridable: false) // let _: P? = para.newBiase?.logDesc(header: "new biase: ", stridable: false)
// let _: P? = para.newScale?.logDesc(header: "new scale: ", stridable: false) // let _: P? = para.newScale?.logDesc(header: "new scale: ", stridable: false)
let output = para.output.metalTexture.floatArray { (p: P) -> P in // let output = para.output.metalTexture.floatArray { (p: P) -> P in
return p // return p
} // }
// // //
writeToLibrary(fileName: "output_112x112x32_2", array: output) // writeToLibrary(fileName: "output_112x112x32_2", array: output)
print(" write done") // print(" write done")
//
// let _: P? = para.output.metalTexture.logDesc(header: "conv add batchnorm relu output: ", stridable: false) // // let _: P? = para.output.metalTexture.logDesc(header: "conv add batchnorm relu output: ", stridable: false)
} }
} }
...@@ -60,9 +60,8 @@ class FeedOp<P: PrecisionType>: Operator<Texture2DTo2DArrayKernel<P>, FeedParam< ...@@ -60,9 +60,8 @@ class FeedOp<P: PrecisionType>: Operator<Texture2DTo2DArrayKernel<P>, FeedParam<
} }
func delogOutput() { func delogOutput() {
// para.input.mtlTexture.logDesc() print(" \(type) output: ")
// let _: P? = para.input.mtlTexture.logDesc(header: "feed input: ", stridable: true) print(para.output.metalTexture.toTensor(dim: (n: para.output.originDim[0], c: para.output.originDim[1], h: para.output.originDim[2], w: para.output.originDim[3])).strideArray())
// let _: P? = para.output.metalTexture.logDesc(header: "feed output: ", stridable: false)
} }
} }
...@@ -769,3 +769,4 @@ kernel void depthwise_conv_batch_norm_relu_3x3(texture2d_array<float, access::sa ...@@ -769,3 +769,4 @@ kernel void depthwise_conv_batch_norm_relu_3x3(texture2d_array<float, access::sa
outTexture.write(output, gid.xy, gid.z); outTexture.write(output, gid.xy, gid.z);
} }
...@@ -48,9 +48,15 @@ class TransposeOp<P: PrecisionType>: Operator<TransposeKernel<P>, TransposeParam ...@@ -48,9 +48,15 @@ class TransposeOp<P: PrecisionType>: Operator<TransposeKernel<P>, TransposeParam
func delogOutput() { func delogOutput() {
print(" \(type) output: ") print(" \(type) output: ")
let originDim = para.output.tensorDim let originDim = para.output.originDim
let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3])) if para.output.transpose == [0, 1, 2, 3] {
print(outputArray.strideArray()) let outputArray = para.output.metalTexture.realNHWC(dim: (n: originDim[0], h: originDim[1], w: originDim[2], c: originDim[3]))
print(outputArray.strideArray())
} else if para.output.transpose == [0, 2, 3, 1] {
print(para.output.metalTexture.toTensor(dim: (n: para.output.tensorDim[0], c: para.output.tensorDim[1], h: para.output.tensorDim[2], w: para.output.tensorDim[3])).strideArray())
} else {
print(" not implement")
}
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册