提交 804beb33 编写于 作者: J jiangjiajun

add trick method for tf2fluid

上级 538a95dd
...@@ -23,6 +23,15 @@ def string(param): ...@@ -23,6 +23,15 @@ def string(param):
return "\'{}\'".format(param) return "\'{}\'".format(param)
def color_log(log_str):
try:
from colorama import init, Fore
init(autoreset=True)
print(Fore.RED + log_str)
except:
print(log_str)
def get_same_padding(in_size, kernel_size, stride): def get_same_padding(in_size, kernel_size, stride):
new_size = int(math.ceil(in_size * 1.0 / stride)) new_size = int(math.ceil(in_size * 1.0 / stride))
pad_size = (new_size - 1) * stride + kernel_size - in_size pad_size = (new_size - 1) * stride + kernel_size - in_size
...@@ -65,7 +74,7 @@ def export_paddle_param(param, param_name, dir): ...@@ -65,7 +74,7 @@ def export_paddle_param(param, param_name, dir):
def init_net(param_dir="./"): def init_net(param_dir="./"):
import os import os
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CUDAPlace(0))
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
def if_exist(var): def if_exist(var):
......
...@@ -14,11 +14,13 @@ ...@@ -14,11 +14,13 @@
from x2paddle.core.graph import GraphNode, Graph from x2paddle.core.graph import GraphNode, Graph
from x2paddle.core.fluid_code import FluidCode from x2paddle.core.fluid_code import FluidCode
from x2paddle.core.util import *
from tensorflow.python.framework import tensor_util from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import gfile from tensorflow.python.platform import gfile
from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import attr_value_pb2
import tensorflow as tf import tensorflow as tf
import copy as cp import copy as cp
import numpy
import sys import sys
...@@ -164,22 +166,23 @@ class TFGraph(Graph): ...@@ -164,22 +166,23 @@ class TFGraph(Graph):
class TFDecoder(object): class TFDecoder(object):
def __init__(self, pb_model): def __init__(self, pb_model):
sess = tf.Session() self.sess = tf.Session()
self.input_example_data = dict() self.input_info = dict()
with gfile.FastGFile(pb_model, 'rb') as f: with gfile.FastGFile(pb_model, 'rb') as f:
graph_def = tf.GraphDef() graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read()) graph_def.ParseFromString(f.read())
input_map = self._check_input_shape(graph_def) input_map = self._check_input_shape(graph_def)
self._fix_output_shape(graph_def) self._fix_output_shape(graph_def)
sess.graph.as_default() self.sess.graph.as_default()
tf.import_graph_def(graph_def, name='', input_map=input_map) tf.import_graph_def(graph_def, name='', input_map=input_map)
for node in graph_def.node: for node in graph_def.node:
print(node.name, node.op, node.input) print(node.name, node.op, node.input)
sess.run(tf.global_variables_initializer()) self.sess.run(tf.global_variables_initializer())
self.tf_graph = TFGraph(sess.graph._as_graph_def(add_shapes=True)[0]) self.tf_graph = TFGraph(
self.sess.graph._as_graph_def(add_shapes=True)[0])
self.tf_graph.build() self.tf_graph.build()
def _fix_output_shape(self, graph): def _fix_output_shape(self, graph):
...@@ -189,6 +192,7 @@ class TFDecoder(object): ...@@ -189,6 +192,7 @@ class TFDecoder(object):
graph.node[i].attr['_disable_call_shape_inference'].b = False graph.node[i].attr['_disable_call_shape_inference'].b = False
def _check_input_shape(self, graph_def): def _check_input_shape(self, graph_def):
numpy.random.seed(13)
graph_def = cp.deepcopy(graph_def) graph_def = cp.deepcopy(graph_def)
input_map = dict() input_map = dict()
for layer in graph_def.node: for layer in graph_def.node:
...@@ -196,19 +200,117 @@ class TFDecoder(object): ...@@ -196,19 +200,117 @@ class TFDecoder(object):
continue continue
graph_node = TFGraphNode(layer) graph_node = TFGraphNode(layer)
dtype = graph_node.dtype dtype = graph_node.dtype
need_define_shape = 0
if not graph_node.get_attr("shape"): if not graph_node.get_attr("shape"):
sys.stderr.write( need_define_shape = 1
"\nUnknown shape for input tensor[tensor name: \"{}\"]\n". else:
value = graph_node.layer.attr["shape"].shape
shape = [dim.size for dim in value.dim]
if shape.count(-1) > 1:
need_define_shape = 2
if need_define_shape > 0:
if need_define_shape == 1:
color_log(
"\nUnknown shape for input tensor[tensor name: \"{}\"]".
format(layer.name)) format(layer.name))
shape = input( else:
"Please define shape of input here(e.g. None,224,224,3): ") color_log(
"\nShape[now is {}] for input tensor[tensor name: \"{}\"] not support yet"
.format(shape, layer.name))
color_log(
"Use your keyboard type the shape of input tensor below :)")
right_shape_been_input = False
while not right_shape_been_input:
shape = input("Shape of Input(e.g. None,224,224,3): ")
if shape.count("None") > 1:
color_log("Only 1 dimension can be None, type again:)")
else:
right_shape_been_input = True
shape = [ shape = [
None if dim == "None" else int(dim) None if dim == "None" else int(dim)
for dim in shape.strip().split(',') for dim in shape.strip().split(',')
] ]
assert shape.count(None) <= 1, "Only one dimension can be None"
x2paddle_input = tf.placeholder(dtype=dtype, x2paddle_input = tf.placeholder(dtype=dtype,
shape=shape, shape=shape,
name="x2paddle_{}".format( name="x2paddle_{}".format(
layer.name)) layer.name))
input_map["{}:0".format(layer.name)] = x2paddle_input input_map["{}:0".format(layer.name)] = x2paddle_input
shape[shape.index(None)] = -1
# self.input_example_data["x2paddle_{}".format(layer.name)] = numpy.random.random_sample(shape).astype(dtype)
self.input_info["x2paddle_{}".format(layer.name)] = (shape,
dtype)
else:
value = graph_node.layer.attr["shape"].shape
shape = [dim.size for dim in value.dim]
# self.input_example_data[graph_node.layer_name] = numpy.random.random_sample(shape).astype(dtype)
self.input_info[graph_node.layer_name] = (shape, dtype)
return input_map return input_map
# trick method
# should be removed after PaddlePaddle V1.6 been released
def infer_tensor(self, graph_node):
print("========== Use infer_tensor for tensor: ", graph_node.layer.name)
if hasattr(graph_node, "index"):
tensor_name = graph_node.layer.name + ":{}".format(graph_node.index)
else:
tensor_name = graph_node.layer.name + ":0"
feed = dict()
for input_name, info in self.input_info.items():
(shape, dtype) = cp.deepcopy(info)
input_tensor = self.sess.graph.get_tensor_by_name(input_name + ":0")
if shape.count(-1) > 0:
shape[shape.index(-1)] = 2
feed[input_tensor] = numpy.random.random_sample(shape)
output_tensor = self.sess.graph.get_tensor_by_name(tensor_name)
return self.sess.run([output_tensor], feed)[0]
def infer_shape_tensor(self, graph_node, out_shape=None):
print("========== Use infer_shape_tensor for tensor: ",
graph_node.layer.name)
if hasattr(graph_node, "index"):
tensor_name = graph_node.layer.name + ":{}".format(graph_node.index)
else:
tensor_name = graph_node.layer.name + ":0"
feed = dict()
batch_size = [2, 3, 5]
results = list()
for b in batch_size:
for input_name, info in self.input_info.items():
(shape, dtype) = cp.deepcopy(info)
input_tensor = self.sess.graph.get_tensor_by_name(input_name +
":0")
if shape.count(-1) > 0:
shape[shape.index(-1)] = b
feed[input_tensor] = numpy.random.random_sample(shape)
output_tensor = self.sess.graph.get_tensor_by_name(tensor_name)
results.append(self.sess.run([output_tensor], feed)[0].flatten())
compare01 = (results[0] == results[1])
compare12 = (results[1] == results[2])
if compare01.all() and compare12.all():
return results[0].tolist()
if (compare01 == compare12).all():
index = numpy.argwhere(compare01 == False).flatten()
if index.shape[0] != 1:
raise Exception("There's not only one unstable dimension")
results[0][index[0]] = -1
index = numpy.argwhere(results[0] < 0).flatten()
if index.shape[0] > 2:
print("Warning: More than two dimension less than zero")
if index.shape[0] == 2 and out_shape is not None:
if out_shape[index[1]] > 0:
results[0][index[1]] = out_shape[index[1]]
else:
results[0][index[0]] = out_shape[index[0]]
return results[0].tolist()
else:
raise Exception("Couldn't infer a stable shape shape tensor value")
...@@ -21,6 +21,7 @@ import numpy ...@@ -21,6 +21,7 @@ import numpy
class TFOpMapper(OpMapper): class TFOpMapper(OpMapper):
def __init__(self, decoder): def __init__(self, decoder):
super(TFOpMapper, self).__init__() super(TFOpMapper, self).__init__()
self.decoder = decoder
self.graph = decoder.tf_graph self.graph = decoder.tf_graph
self.weights = dict() self.weights = dict()
self.omit_nodes = list() self.omit_nodes = list()
...@@ -162,13 +163,6 @@ class TFOpMapper(OpMapper): ...@@ -162,13 +163,6 @@ class TFOpMapper(OpMapper):
def RealDiv(self, node): def RealDiv(self, node):
self.elementwise_operator(node, "elementwise_div") self.elementwise_operator(node, "elementwise_div")
# x = self.graph.get_node(node.layer.input[0], copy=True)
# y = self.graph.get_node(node.layer.input[1], copy=True)
# inputs = {'x': x, 'y': y}
# node.fluid_code.add_layer("elementwise_div",
# inputs=inputs,
# output=node,
# param_attr=None)
def Relu(self, node): def Relu(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
...@@ -204,7 +198,11 @@ class TFOpMapper(OpMapper): ...@@ -204,7 +198,11 @@ class TFOpMapper(OpMapper):
def MaxPool(self, node): def MaxPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = node.get_attr("ksize") k_size = node.get_attr("ksize")
strides = node.get_attr("strides") strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
...@@ -257,8 +255,16 @@ class TFOpMapper(OpMapper): ...@@ -257,8 +255,16 @@ class TFOpMapper(OpMapper):
assert kernel.layer_type == "Const", "Kernel of Conv2D should be Const" assert kernel.layer_type == "Const", "Kernel of Conv2D should be Const"
self.omit_nodes.append(kernel.layer_name) self.omit_nodes.append(kernel.layer_name)
node.fluid_code.add_note("#{} : {}".format(node.layer.name,
node.layer_name))
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0] k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides") strides = node.get_attr("strides")
dilations = node.get_attr("dilations") dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
...@@ -321,6 +327,8 @@ class TFOpMapper(OpMapper): ...@@ -321,6 +327,8 @@ class TFOpMapper(OpMapper):
beta = self.graph.get_node(node.layer.input[2], copy=True) beta = self.graph.get_node(node.layer.input[2], copy=True)
moving_mean = self.graph.get_node(node.layer.input[3], copy=True) moving_mean = self.graph.get_node(node.layer.input[3], copy=True)
moving_var = self.graph.get_node(node.layer.input[4], copy=True) moving_var = self.graph.get_node(node.layer.input[4], copy=True)
data_format = node.get_attr("data_format").decode()
channel_first = data_format == "NCHW"
assert gamma.layer_type == "Const" assert gamma.layer_type == "Const"
assert beta.layer_type == "Const" assert beta.layer_type == "Const"
...@@ -331,10 +339,17 @@ class TFOpMapper(OpMapper): ...@@ -331,10 +339,17 @@ class TFOpMapper(OpMapper):
self.omit_nodes.append(moving_mean.layer_name) self.omit_nodes.append(moving_mean.layer_name)
self.omit_nodes.append(moving_var.layer_name) self.omit_nodes.append(moving_var.layer_name)
if not channel_first:
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
attr = { attr = {
"epsilon": node.get_attr("epsilon"), "epsilon": node.get_attr("epsilon"),
"param_attr": string(gamma.layer_name), "param_attr": string(gamma.layer_name),
"data_layout": string(node.get_attr("data_format").decode()), # "data_layout": string(node.get_attr("data_format").decode()),
"bias_attr": string(beta.layer_name), "bias_attr": string(beta.layer_name),
"moving_mean_name": string(moving_mean.layer_name), "moving_mean_name": string(moving_mean.layer_name),
"moving_variance_name": string(moving_var.layer_name), "moving_variance_name": string(moving_var.layer_name),
...@@ -342,7 +357,14 @@ class TFOpMapper(OpMapper): ...@@ -342,7 +357,14 @@ class TFOpMapper(OpMapper):
} }
node.fluid_code.add_layer("batch_norm", node.fluid_code.add_layer("batch_norm",
inputs=input, inputs=input if channel_first else node,
output=node,
param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node, output=node,
param_attr=attr) param_attr=attr)
...@@ -352,8 +374,16 @@ class TFOpMapper(OpMapper): ...@@ -352,8 +374,16 @@ class TFOpMapper(OpMapper):
assert kernel.layer_type == "Const", "Kernel of DepthwiseConv2DNative should be Const" assert kernel.layer_type == "Const", "Kernel of DepthwiseConv2DNative should be Const"
self.omit_nodes.append(kernel.layer_name) self.omit_nodes.append(kernel.layer_name)
node.fluid_code.add_note("#{} : {}".format(node.layer.name,
node.layer_name))
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0] k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides") strides = node.get_attr("strides")
dilations = node.get_attr("dilations") dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
...@@ -418,6 +448,11 @@ class TFOpMapper(OpMapper): ...@@ -418,6 +448,11 @@ class TFOpMapper(OpMapper):
self.omit_nodes.append(param.layer_name) self.omit_nodes.append(param.layer_name)
else: else:
# Here is a trick method to solove tensor parameter in tensorflow # Here is a trick method to solove tensor parameter in tensorflow
shape = self.decoder.infer_shape_tensor(param, node.out_shapes[0])
if shape.count(-1) <= 1:
attr = {"shape": shape}
self.omit_nodes.append(param.layer_name)
else:
assert len(param.out_shapes[0] assert len(param.out_shapes[0]
) == 1, "Unexpected situation of shape parameter" ) == 1, "Unexpected situation of shape parameter"
attr = {"shape": [-1]} attr = {"shape": [-1]}
...@@ -440,35 +475,34 @@ class TFOpMapper(OpMapper): ...@@ -440,35 +475,34 @@ class TFOpMapper(OpMapper):
output=node, output=node,
param_attr=attr) param_attr=attr)
# temporary shape inference fix # temporary shape inference fix
if param.layer_type == "Pack":
shape_slices = list()
for i in range(len(param.layer.input)): # if param.layer_type == "Pack":
slice = self.graph.get_node(param.layer.input[i], copy=True) # shape_slices = list()
if slice.layer_type == "Const": # for i in range(len(param.layer.input)):
shape_slices.append(slice.value.tolist()) # slice = self.graph.get_node(param.layer.input[i], copy=True)
else: # if slice.layer_type == "Const":
shape_slices.append(0) # shape_slices.append(slice.value.tolist())
if shape_slices.count(-1) == 0: # else:
shape_slices[shape_slices.index(0)] = -1 # shape_slices.append(0)
attr = {"shape": shape_slices} # if shape_slices.count(-1) == 0:
node.fluid_code.add_layer("reshape", # shape_slices[shape_slices.index(0)] = -1
inputs=node, # attr = {"shape": shape_slices}
output=node, # node.fluid_code.add_layer("reshape",
param_attr=attr) # inputs=node,
# output=node,
# param_attr=attr)
def Add(self, node): def Add(self, node):
self.elementwise_operator(node, "elementwise_add") self.elementwise_operator(node, "elementwise_add")
# x = self.graph.get_node(node.layer.input[0], copy=True)
# y = self.graph.get_node(node.layer.input[1], copy=True)
# inputs = {"x": x, "y": y}
# node.fluid_code.add_layer("elementwise_add",
# inputs=inputs,
# output=node,
# param_attr=None)
def AvgPool(self, node): def AvgPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = node.get_attr("ksize") k_size = node.get_attr("ksize")
strides = node.get_attr("strides") strides = node.get_attr("strides")
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
...@@ -524,13 +558,6 @@ class TFOpMapper(OpMapper): ...@@ -524,13 +558,6 @@ class TFOpMapper(OpMapper):
def Maximum(self, node): def Maximum(self, node):
self.elementwise_operator(node, "elementwise_max") self.elementwise_operator(node, "elementwise_max")
# x = self.graph.get_node(node.layer.input[0], copy=True)
# y = self.graph.get_node(node.layer.input[1], copy=True)
# inputs = {"x": x, "y": y}
# node.fluid_code.add_layer("elementwise_max",
# inputs=inputs,
# output=node,
# param_attr=None)
def SplitV(self, node): def SplitV(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
...@@ -602,9 +629,6 @@ class TFOpMapper(OpMapper): ...@@ -602,9 +629,6 @@ class TFOpMapper(OpMapper):
output=node, output=node,
param_attr=attr) param_attr=attr)
# def ResizeNearestNeighbor(self, node):
# pass
def Range(self, node): def Range(self, node):
start = self.graph.get_node(node.layer.input[0], copy=True) start = self.graph.get_node(node.layer.input[0], copy=True)
limit = self.graph.get_node(node.layer.input[1], copy=True) limit = self.graph.get_node(node.layer.input[1], copy=True)
...@@ -625,28 +649,11 @@ class TFOpMapper(OpMapper): ...@@ -625,28 +649,11 @@ class TFOpMapper(OpMapper):
output=node, output=node,
param_attr=None) param_attr=None)
# def Fill(self, node):
# shape = self.graph.get_node(node.layer
def Mul(self, node): def Mul(self, node):
self.elementwise_operator(node, "elementwise_mul") self.elementwise_operator(node, "elementwise_mul")
# x = self.graph.get_node(node.layer.input[0], copy=True)
# y = self.graph.get_node(node.layer.input[1], copy=True)
# inputs = {"x": x, "y": y}
# node.fluid_code.add_layer("elementwise_mul",
# inputs=inputs,
# output=node,
# param_attr=None)
def Sub(self, node): def Sub(self, node):
self.elementwise_operator(node, "elementwise_sub") self.elementwise_operator(node, "elementwise_sub")
# x = self.graph.get_node(node.layer.input[0], copy=True)
# y = self.graph.get_node(node.layer.input[1], copy=True)
# inputs = {"x": x, "y": y}
# node.fluid_code.add_layer("elementwise_sub",
# inputs=inputs,
# output=node,
# param_attr=None)
def Rsqrt(self, node): def Rsqrt(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
...@@ -684,6 +691,16 @@ class TFOpMapper(OpMapper): ...@@ -684,6 +691,16 @@ class TFOpMapper(OpMapper):
transpose_a = node.get_attr('transpose_a') transpose_a = node.get_attr('transpose_a')
transpose_b = node.get_attr('transpose_b') transpose_b = node.get_attr('transpose_b')
inputs = {"x": x, "y": y} inputs = {"x": x, "y": y}
# fix paddle shape infer problem
# should be removed after paddle 1.6
if x.out_shapes[0][-1] < 0 and y.out_shapes[0][0] > 0:
shape = x.out_shapes[0]
shape[-1] = y.out_shapes[0][0]
attr = {"shape": shape}
node.fluid_code.add_layer("reshape",
inputs=x,
output=x,
param_attr=attr)
attr = {"transpose_x": transpose_a, "transpose_y": transpose_b} attr = {"transpose_x": transpose_a, "transpose_y": transpose_b}
node.fluid_code.add_layer("matmul", node.fluid_code.add_layer("matmul",
inputs=inputs, inputs=inputs,
...@@ -729,13 +746,24 @@ class TFOpMapper(OpMapper): ...@@ -729,13 +746,24 @@ class TFOpMapper(OpMapper):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
begin = self.graph.get_node(node.layer.input[1], copy=True) begin = self.graph.get_node(node.layer.input[1], copy=True)
size = self.graph.get_node(node.layer.input[2], copy=True) size = self.graph.get_node(node.layer.input[2], copy=True)
assert begin.layer_type == "Const" # assert begin.layer_type == "Const"
assert size.layer_type == "Const" # assert size.layer_type == "Const"
self.omit_nodes.append(begin.layer_name) self.omit_nodes.append(begin.layer_name)
self.omit_nodes.append(size.layer_name) self.omit_nodes.append(size.layer_name)
if begin.layer_type == "Const":
begin = begin.value.tolist()
else:
begin = self.decoder.infer_tensor(begin).tolist()
if size.layer_type == "const":
size = size.value.tolist()
else:
size = self.decoder.infer_tensor(size).tolist()
attr = {"shape": size.value.tolist(), "offsets": begin.value.tolist()} attr = {"shape": size, "offsets": begin}
node.code.add_layer("crop", inputs=input, output=node, param_attr=attr) node.fluid_code.add_layer("crop",
inputs=input,
output=node,
param_attr=attr)
def Abs(self, node): def Abs(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
...@@ -750,8 +778,16 @@ class TFOpMapper(OpMapper): ...@@ -750,8 +778,16 @@ class TFOpMapper(OpMapper):
assert kernel.layer_type == "Const", "Kernel of Conv2DBackpropInput should be Const" assert kernel.layer_type == "Const", "Kernel of Conv2DBackpropInput should be Const"
self.omit_nodes.append(kernel.layer_name) self.omit_nodes.append(kernel.layer_name)
node.fluid_code.add_note("#{} : {}".format(node.layer.name,
node.layer_name))
in_shape = input.out_shapes[0] in_shape = input.out_shapes[0]
if in_shape.count(-1) > 2:
in_shape = self.decoder.infer_tensor(input).shape
k_size = kernel.out_shapes[0] k_size = kernel.out_shapes[0]
if k_size.count(-1) > 2:
k_size = self.decoder.infer_tensor(kernel).shape
strides = node.get_attr("strides") strides = node.get_attr("strides")
dilations = node.get_attr("dilations") dilations = node.get_attr("dilations")
data_format = node.get_attr("data_format").decode() data_format = node.get_attr("data_format").decode()
...@@ -823,14 +859,6 @@ class TFOpMapper(OpMapper): ...@@ -823,14 +859,6 @@ class TFOpMapper(OpMapper):
output=node, output=node,
param_attr=attr) param_attr=attr)
# def GreaterEqual(self, node):
# pass
#
# def RandomUniform(self, node):
# pass
#
def Cast(self, node): def Cast(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True) input = self.graph.get_node(node.layer.input[0], copy=True)
dtype = node.dtype_map[node.get_attr('DstT')] dtype = node.dtype_map[node.get_attr('DstT')]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册