提交 715009b5 编写于 作者: M Megvii Engine Team

refactor(mge/api): remove external, dropout fix

GitOrigin-RevId: 5e6ff1a372522be2e7af85a55edf038f0520ddab
上级 9005cf74
...@@ -1226,7 +1226,7 @@ def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor: ...@@ -1226,7 +1226,7 @@ def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
""" """
assert 0 <= drop_prob < 1 assert 0 <= drop_prob < 1
rv = uniform(inp.shape) rv = uniform(size=inp.shape)
mask = rv > drop_prob mask = rv > drop_prob
inp *= mask.astype(inp.dtype) inp *= mask.astype(inp.dtype)
if training: if training:
......
...@@ -25,6 +25,6 @@ class Dropout(Module): ...@@ -25,6 +25,6 @@ class Dropout(Module):
def forward(self, inputs): def forward(self, inputs):
if self.training: if self.training:
return dropout(inputs, self.drop_prob, rescale=True) return dropout(inputs, self.drop_prob, training=True)
else: else:
return inputs return inputs
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
from ..functional import cambricon_subgraph, extern_opr_subgraph
from .module import Module
class CambriconSubgraph(Module):
r"""Load a serialized Cambricon subgraph.
See :func:`~.cambricon_subgraph` for more details.
"""
def __init__(
self, data, symbol, tensor_dim_mutable,
):
super(CambriconSubgraph, self).__init__()
self._data = data
self.symbol = symbol
self.tensor_dim_mutable = tensor_dim_mutable
@property
def data(self):
return self._data.tobytes()
@data.setter
def data(self, val):
self._data = np.frombuffer(val, dtype=np.uint8)
def forward(self, inputs):
outputs = cambricon_subgraph(
inputs, self._data, self.symbol, self.tensor_dim_mutable,
)
return outputs
class ExternOprSubgraph(Module):
r"""Load a serialized extern opr subgraph.
"""
def __init__(self, data, name, output_shapes):
super(ExternOprSubgraph, self).__init__()
self.data = data
self.name = name
self.output_shapes = output_shapes
def forward(self, inputs):
outputs = extern_opr_subgraph(inputs, self.output_shapes, self.name, self.data,)
return outputs
...@@ -113,6 +113,52 @@ def test_where(): ...@@ -113,6 +113,52 @@ def test_where():
opr_test(cases, F.where, ref_fn=np.where) opr_test(cases, F.where, ref_fn=np.where)
def test_dropout():
data = tensor(np.ones(10, dtype=np.float32))
out = F.dropout(data, 1.0 / 3.0, training=False)
assert out.numpy().sum() >= 0.0
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 10, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.matmul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_interpolate(): def test_interpolate():
def linear_interpolate(): def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2)) inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
...@@ -281,48 +327,6 @@ def test_add_update_params(): ...@@ -281,48 +327,6 @@ def test_add_update_params():
assertTensorClose(res.numpy(), b + 1) assertTensorClose(res.numpy(), b + 1)
# def test_cross_entropy_with_softmax():
# data1_shape = (1, 2)
# label1_shape = (1,)
# data2_shape = (1, 3)
# label2_shape = (1,)
# data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
# label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
# expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
# data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
# label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
# expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
# cases = [
# {"input": [data1, label1], "output": expect1,},
# {"input": [data2, label2], "output": expect2,},
# ]
# opr_test(cases, F.cross_entropy_with_softmax)
# def test_cross_entropy():
# data1_shape = (1, 2)
# label1_shape = (1,)
# data2_shape = (1, 3)
# label2_shape = (1,)
# data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
# label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
# expect1 = np.array([-np.log(0.5)], dtype=np.float32)
# data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
# label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
# expect2 = np.array([-np.log(0.4)], dtype=np.float32)
# cases = [
# {"input": [data1, label1], "output": expect1,},
# {"input": [data2, label2], "output": expect2,},
# ]
# opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy(): def test_binary_cross_entropy():
data1_shape = (2, 2) data1_shape = (2, 2)
label1_shape = (2, 2) label1_shape = (2, 2)
...@@ -413,19 +417,6 @@ def test_batched_nms(): ...@@ -413,19 +417,6 @@ def test_batched_nms():
np.testing.assert_equal(results.numpy(), np.array([1, 4, 5], dtype=np.int32)) np.testing.assert_equal(results.numpy(), np.array([1, 4, 5], dtype=np.int32))
# def test_smooth_l1_loss():
# np.random.seed(123)
# cases = []
# for shape in [(2, 2), (2, 3)]:
# data = np.random.uniform(size=shape).astype(np.float32)
# label = np.random.uniform(size=shape).astype(np.float32)
# diff = np.abs(data - label)
# expect = np.where(diff < 1, 0.5 * diff ** 2, diff - 0.5).mean()
# cases.append({"input": [data, label], "output": tensor(expect)})
# opr_test(cases, F.smooth_l1_loss)
def test_conv_bias(): def test_conv_bias():
inp_scale = 1.5 inp_scale = 1.5
w_scale = 2.5 w_scale = 2.5
......
...@@ -203,93 +203,3 @@ def test_normalize(): ...@@ -203,93 +203,3 @@ def test_normalize():
cases[0]["input"][0, 0, 0, :] = 0 cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0 cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3)) opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 10, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.matmul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
# def test_logsumexp():
# x = np.arange(10).astype(np.float32)
# expected = np.log(np.sum(np.exp(x)))
# cases = [{"input": x, "output": expected}]
# compare_fn = partial(assertTensorClose, allow_special_values=True)
# # large value check
# n = 100
# x = np.full(n, 10000, dtype=np.float32)
# expected = 10000 + np.log(n)
# cases.append({"input": x, "output": expected.astype(np.float32)})
# opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
# # special value check
# x = np.array([np.inf], dtype=np.float32)
# expected = x
# cases = [{"input": x, "output": expected}]
# x = np.array([-np.inf, 0.0], dtype=np.float32)
# expected = np.zeros(1).astype(np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
# x = np.array([np.nan], dtype=np.float32)
# expected = x
# cases = [{"input": x, "output": expected}]
# x = np.array([-np.inf, 1], dtype=np.float32)
# expected = np.array([1.0], dtype=np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
# # keepdims check
# x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
# expected = np.array([[1e10], [-1e10]], dtype=np.float32)
# cases = [{"input": x, "output": expected}]
# x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
# expected = np.array([[1e10], [np.inf]], dtype=np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=1, keepdims=True, compare_fn=compare_fn)
# # multiple axes check
# x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
# expected = np.array([1e10], dtype=np.float32)
# cases = [{"input": x, "output": expected}]
# x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
# expected = np.array([np.inf], dtype=np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=(0, 1), keepdims=False, compare_fn=compare_fn)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册