提交 2e1cd331 编写于 作者: R ranqiu

Update dot_prod_layer

上级 aa250718
...@@ -335,6 +335,16 @@ bilinear_interp ...@@ -335,6 +335,16 @@ bilinear_interp
.. autoclass:: paddle.v2.layer.bilinear_interp .. autoclass:: paddle.v2.layer.bilinear_interp
:noindex: :noindex:
dot_prod
---------
.. autoclass:: paddle.v2.layer.dot_prod
:noindex:
out_prod
--------
.. autoclass:: paddle.v2.layer.out_prod
:noindex:
power power
----- -----
.. autoclass:: paddle.v2.layer.power .. autoclass:: paddle.v2.layer.power
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
/** /**
* @brief A layer for computing the dot product of two vectors * @brief A layer for computing the dot product of two vectors.
* Input1: vector (batchSize * dim) * Input1: vector (batchSize * dim)
* Input2: vector (batchSize * dim) * Input2: vector (batchSize * dim)
* Output: a matrix: (batchSize * 1) * Output: a matrix: (batchSize * 1)
...@@ -46,7 +46,8 @@ bool DotProdLayer::init(const LayerMap& layerMap, ...@@ -46,7 +46,8 @@ bool DotProdLayer::init(const LayerMap& layerMap,
Layer::init(layerMap, parameterMap); Layer::init(layerMap, parameterMap);
CHECK_EQ(inputLayers_.size(), 2U); CHECK_EQ(inputLayers_.size(), 2U);
CHECK_EQ(1, getSize()) << "Dimension mismatch"; CHECK_EQ(1UL, getSize())
<< "The output dimensionality of this layer should be fixed to 1.";
return true; return true;
} }
...@@ -59,6 +60,7 @@ void DotProdLayer::forward(PassType passType) { ...@@ -59,6 +60,7 @@ void DotProdLayer::forward(PassType passType) {
size_t batchSize = inV0->getHeight(); size_t batchSize = inV0->getHeight();
CHECK_EQ(inV1->getHeight(), batchSize); CHECK_EQ(inV1->getHeight(), batchSize);
CHECK_EQ(inV0->getWidth(), inV1->getWidth());
{ {
REGISTER_TIMER_INFO("FwResetTimer", getName().c_str()); REGISTER_TIMER_INFO("FwResetTimer", getName().c_str());
......
...@@ -1092,7 +1092,7 @@ TEST(Layer, DotProdLayer) { ...@@ -1092,7 +1092,7 @@ TEST(Layer, DotProdLayer) {
config.layerConfig.add_inputs(); config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) { for (auto useGpu : {false, true}) {
testLayerGrad(config, "dot_prod", 100, false, useGpu); testLayerGrad(config, "dot_prod", 10, false, useGpu);
} }
} }
......
...@@ -3214,7 +3214,10 @@ class DotProdLayer(LayerBase): ...@@ -3214,7 +3214,10 @@ class DotProdLayer(LayerBase):
def __init__(self, name, inputs, device=None): def __init__(self, name, inputs, device=None):
super(DotProdLayer, self).__init__( super(DotProdLayer, self).__init__(
name, 'dot_prod', 0, inputs, device=device) name, 'dot_prod', 0, inputs, device=device)
config_assert(len(inputs) == 2, 'DotProdLayer must have 2 inputs') config_assert(len(inputs) == 2, 'DotProdLayer must have 2 inputs.')
config_assert(
self.get_input_layer(0).size == self.get_input_layer(1).size,
"Two inputs should have the same size.")
self.set_layer_size(1) self.set_layer_size(1)
......
...@@ -10,6 +10,7 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la ...@@ -10,6 +10,7 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer
test_seq_slice_layer test_cross_entropy_over_beam test_roi_pool_layer test_pooling3D_layer test_seq_slice_layer test_cross_entropy_over_beam test_roi_pool_layer test_pooling3D_layer
test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer test_scale_sub_region_layer) test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer test_scale_sub_region_layer
test_dot_prod_layer)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
type: "nn"
layers {
name: "vector1"
type: "data"
size: 10
active_type: ""
}
layers {
name: "vector2"
type: "data"
size: 10
active_type: ""
}
layers {
name: "__dot_prod_layer_0__"
type: "dot_prod"
size: 1
active_type: ""
inputs {
input_layer_name: "vector1"
}
inputs {
input_layer_name: "vector2"
}
}
input_layer_names: "vector1"
input_layer_names: "vector2"
output_layer_names: "__dot_prod_layer_0__"
sub_models {
name: "root"
layer_names: "vector1"
layer_names: "vector2"
layer_names: "__dot_prod_layer_0__"
input_layer_names: "vector1"
input_layer_names: "vector2"
output_layer_names: "__dot_prod_layer_0__"
is_recurrent_layer_group: false
}
from paddle.trainer_config_helpers import *
vec1 = data_layer(name='vector1', size=10)
vec2 = data_layer(name='vector2', size=10)
dot_product = dot_prod_layer(input1=vec1, input2=vec2)
outputs(dot_product)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册