提交 9e266cab 编写于 作者: B BBuf

support max_pool2d bug

上级 69e0699a
...@@ -155,65 +155,35 @@ def _ConvConvertInputs( ...@@ -155,65 +155,35 @@ def _ConvConvertInputs(
def _AddPadding(ctx, node, kernel_shape, strides, dilations=None, spatial=2): def _AddPadding(ctx, node, kernel_shape, strides, dilations=None, spatial=2):
padding = node.attrs.get("padding") if dilations is None:
if padding: dilations = [1] * spatial * 2
if dilations is None: pads = [0] * spatial * 2
dilations = [1] * spatial * 2 input_shape = ctx.get_shape(node.input_tensor_names[0])
if padding == "same": output_shape = ctx.get_shape(node.output_tensor_names[0])
padding = "same_lower" # check if the input shape is valid
if padding in ["same_lower", "same_upper"]: if len(input_shape) != len(pads):
pads = [0] * spatial * 2 logger.error(
input_shape = ctx.get_shape(node.input_tensor_names[0]) "node %s input needs to be rank %d, is %d",
output_shape = ctx.get_shape(node.output_tensor_names[0]) node.name,
# check if the input shape is valid len(pads),
if len(input_shape) != len(pads): len(input_shape),
logger.error( )
"node %s input needs to be rank %d, is %d", # transpose shape to nchw
node.name, if node.is_nhwc():
len(pads), input_shape = _SpatialMap(input_shape, constants.NHWC_TO_NCHW)
len(input_shape), output_shape = _SpatialMap(output_shape, constants.NHWC_TO_NCHW)
) for i in range(spatial):
# transpose shape to nchw pad = (
if node.is_nhwc(): (output_shape[i + 2] - 1) * strides[i]
input_shape = _SpatialMap(input_shape, constants.NHWC_TO_NCHW) + dilations[i] * (kernel_shape[i] - 1)
output_shape = _SpatialMap(output_shape, constants.NHWC_TO_NCHW) + 1
# calculate pads - input_shape[i + 2]
if any( )
input_shape[i + 2] == -1 or output_shape[i + 2] == -1 pad = max(pad, 0)
for i in range(spatial) pads[i + spatial] = pad // 2
): pads[i] = pad - pad // 2
logger.debug( node.attrs["pads"] = pads
"node %s has unknown dim for pads calculation, fallback to auto_pad: "
"input_shape=%s, output_shape=%s",
node.name,
input_shape,
output_shape,
)
if padding == "same_lower":
node.attrs["auto_pad"] = "SAME_LOWER"
else:
node.attrs["auto_pad"] = "SAME_UPPER"
else:
for i in range(spatial):
pad = (
(output_shape[i + 2] - 1) * strides[i]
+ dilations[i] * (kernel_shape[i] - 1)
+ 1
- input_shape[i + 2]
)
pad = max(pad, 0)
if padding == "same_lower":
pads[i + spatial] = pad // 2
pads[i] = pad - pad // 2
else:
pads[i] = pad // 2
pads[i + spatial] = pad - pad // 2
node.attrs["pads"] = pads
elif padding == "valid":
pass
else:
raise ValueError("invalid padding value: " + padding)
def conv_dims_attr(node, name, new_name=None): def conv_dims_attr(node, name, new_name=None):
...@@ -285,8 +255,8 @@ class PoolOp: ...@@ -285,8 +255,8 @@ class PoolOp:
# T Y = MaxPool(T X, @AttrType.STRING auto_pad, @AttrType.INTS kernel_shape, @AttrType.INTS pads, # T Y = MaxPool(T X, @AttrType.STRING auto_pad, @AttrType.INTS kernel_shape, @AttrType.INTS pads,
# @AttrType.INTS strides) # @AttrType.INTS strides)
if len(node.input_tensor_names) < 3: if len(node.input_tensor_names) < 3:
kernel_shape_flow = node.attrs["pool_size"] kernel_shape_flow = node.attrs["kernel_size"]
strides_flow = node.attrs["strides"] strides_flow = node.attrs["stride"]
else: else:
kernel_shape_flow = node.input_nodes[1].get_tensor_value() kernel_shape_flow = node.input_nodes[1].get_tensor_value()
strides_flow = node.input_nodes[2].get_tensor_value() strides_flow = node.input_nodes[2].get_tensor_value()
......
...@@ -859,7 +859,7 @@ class Graph(object): ...@@ -859,7 +859,7 @@ class Graph(object):
tensor_name = node.output_tensor_names[0] tensor_name = node.output_tensor_names[0]
# TODO(daquexian): node.output_tensor_names[0] is "node_name/output_name", so this pathjoin doesn't work # TODO(daquexian): node.output_tensor_names[0] is "node_name/output_name", so this pathjoin doesn't work
# on windows (where path separator is "\") # on windows (where path separator is "\")
path = pathjoin(self._model_save_dir, node.output_tensor_names[0][2:]) path = pathjoin(self._model_save_dir, ".".join(node.output_tensor_names[0].split(".")[1:]))
tensor_value = np.fromfile( tensor_value = np.fromfile(
path, dtype=util.Onnx2NumpyDtype(self.get_dtype(tensor_name)) path, dtype=util.Onnx2NumpyDtype(self.get_dtype(tensor_name))
).reshape(self.get_shape(tensor_name)) ).reshape(self.get_shape(tensor_name))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册