提交 8cc9d887 编写于 作者: T Travis CI

Deploy to GitHub Pages: bfe6dcb5

上级 d35afb26
...@@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples. ...@@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples.
The following C++ programs shows how blocks are used with the `if-else` structure: The following C++ programs shows how blocks are used with the `if-else` structure:
```c++ ```c++
namespace pd = paddle;
int x = 10; int x = 10;
int y = 20; int y = 1;
int out; int z = 10;
bool cond = false; bool cond = false;
int o1, o2;
if (cond) { if (cond) {
int z = x + y; int z = x + y;
out = softmax(z); o1 = z;
o2 = pd::layer::softmax(z);
} else { } else {
int z = fc(x); int d = pd::layer::fc(z);
out = z; o1 = d;
o2 = d+1;
} }
``` ```
An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows: An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows:
...@@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator ...@@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator
```python ```python
import paddle as pd import paddle as pd
x = var(10) x = minibatch([10, 20, 30]) # shape=[None, 1]
y = var(20) y = var(1) # shape=[1], value=1
cond = var(false) z = minibatch([10, 20, 30]) # shape=[None, 1]
ie = pd.create_ifelseop(inputs=[x], output_num=1) cond = larger_than(x, 15) # [false, true, true]
ie = pd.ifelse()
with ie.true_block(): with ie.true_block():
x = ie.inputs(true, 0) d = pd.layer.add_scalar(x, y)
z = operator.add(x, y) ie.output(d, pd.layer.softmax(d))
ie.set_output(true, 0, operator.softmax(z))
with ie.false_block(): with ie.false_block():
x = ie.inputs(false, 0) d = pd.layer.fc(z)
z = layer.fc(x) ie.output(d, d+1)
ie.set_output(true, 0, operator.softmax(z)) o1, o2 = ie(cond)
out = b(cond)
``` ```
In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`. In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`.
A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values.
### Blocks with `for` and `RNNOp` ### Blocks with `for` and `RNNOp`
The following RNN model from the [RNN design doc](./rnn.md) The following RNN model from the [RNN design doc](./rnn.md)
```python ```python
x = sequence([10, 20, 30]) x = sequence([10, 20, 30]) # shape=[None, 1]
m = var(0) m = var(0) # shape=[1]
W = tensor() W = var(0.314, param=true) # shape=[1]
U = tensor() U = var(0.375, param=true) # shape=[1]
rnn = create_rnn(inputs=[input]) rnn = pd.rnn()
with rnn.stepnet() as net: with rnn.step():
x = net.set_inputs(0) h = rnn.memory(init = m)
h = net.add_memory(init=m) hh = rnn.previous_memory(h)
fc_out = pd.matmul(W, x) a = layer.fc(W, x)
hidden_out = pd.matmul(U, h.pre(n=1)) b = layer.fc(U, hh)
sum = pd.add_two(fc_out, hidden_out) s = pd.add(a, b)
act = pd.sigmoid(sum) act = pd.sigmoid(s)
h.update(act) # update memory with act rnn.update_memory(h, act)
net.set_outputs(0, act, hidden_out) # two outputs rnn.output(a, b)
o1, o2 = rnn() o1, o2 = rnn()
print o1, o2
``` ```
has its equivalent C++ program as follows has its equivalent C++ program as follows
```c++ ```c++
int* x = {10, 20, 30}; int* x = {10, 20, 30};
int m = 0; int* m = {0};
int W = some_value(); int* W = {0.314};
int U = some_other_value(); int* U = {0.375};
int mem[sizeof(x) / sizeof(x[0]) + 1]; int mem[sizeof(x) / sizeof(x[0]) + 1];
int o1[sizeof(x) / sizeof(x[0]) + 1]; int o1[sizeof(x) / sizeof(x[0]) + 1];
...@@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1]; ...@@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1];
for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) {
int x = x[i-1]; int x = x[i-1];
if (i == 1) mem[0] = m; if (i == 1) mem[0] = m;
int fc_out = W * x; int a = W * x;
int hidden_out = Y * mem[i-1]; int b = Y * mem[i-1];
int sum = fc_out + hidden_out; int s = fc_out + hidden_out;
int act = sigmoid(sum); int act = sigmoid(sum);
mem[i] = act; mem[i] = act;
o1[i] = act; o1[i] = act;
o2[i] = hidden_out; o2[i] = hidden_out;
} }
print_array(o1);
print_array(o2);
``` ```
## Compilation and Execution ## Compilation and Execution
Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference.
...@@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20]) ...@@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20])
b = pd.fc(a, params=["fc.w", "fc.b"]) b = pd.fc(a, params=["fc.w", "fc.b"])
rnn = pd.create_rnn() rnn = pd.create_rnn()
with rnn.stepnet() as net: with rnn.stepnet()
x = net.set_inputs(a) x = a.as_step_input()
# reuse fc's parameter # reuse fc's parameter
fc_without_b = pd.get_variable("fc.w") fc_without_b = pd.get_variable("fc.w")
net.set_outputs(fc_without_b) rnn.output(fc_without_b)
out = rnn() out = rnn()
``` ```
......
此差异已折叠。
因为 它太大了无法显示 source diff 。你可以改为 查看blob
...@@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples. ...@@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples.
The following C++ programs shows how blocks are used with the `if-else` structure: The following C++ programs shows how blocks are used with the `if-else` structure:
```c++ ```c++
namespace pd = paddle;
int x = 10; int x = 10;
int y = 20; int y = 1;
int out; int z = 10;
bool cond = false; bool cond = false;
int o1, o2;
if (cond) { if (cond) {
int z = x + y; int z = x + y;
out = softmax(z); o1 = z;
o2 = pd::layer::softmax(z);
} else { } else {
int z = fc(x); int d = pd::layer::fc(z);
out = z; o1 = d;
o2 = d+1;
} }
``` ```
An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows: An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows:
...@@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator ...@@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator
```python ```python
import paddle as pd import paddle as pd
x = var(10) x = minibatch([10, 20, 30]) # shape=[None, 1]
y = var(20) y = var(1) # shape=[1], value=1
cond = var(false) z = minibatch([10, 20, 30]) # shape=[None, 1]
ie = pd.create_ifelseop(inputs=[x], output_num=1) cond = larger_than(x, 15) # [false, true, true]
ie = pd.ifelse()
with ie.true_block(): with ie.true_block():
x = ie.inputs(true, 0) d = pd.layer.add_scalar(x, y)
z = operator.add(x, y) ie.output(d, pd.layer.softmax(d))
ie.set_output(true, 0, operator.softmax(z))
with ie.false_block(): with ie.false_block():
x = ie.inputs(false, 0) d = pd.layer.fc(z)
z = layer.fc(x) ie.output(d, d+1)
ie.set_output(true, 0, operator.softmax(z)) o1, o2 = ie(cond)
out = b(cond)
``` ```
In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`. In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`.
A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values.
### Blocks with `for` and `RNNOp` ### Blocks with `for` and `RNNOp`
The following RNN model from the [RNN design doc](./rnn.md) The following RNN model from the [RNN design doc](./rnn.md)
```python ```python
x = sequence([10, 20, 30]) x = sequence([10, 20, 30]) # shape=[None, 1]
m = var(0) m = var(0) # shape=[1]
W = tensor() W = var(0.314, param=true) # shape=[1]
U = tensor() U = var(0.375, param=true) # shape=[1]
rnn = create_rnn(inputs=[input]) rnn = pd.rnn()
with rnn.stepnet() as net: with rnn.step():
x = net.set_inputs(0) h = rnn.memory(init = m)
h = net.add_memory(init=m) hh = rnn.previous_memory(h)
fc_out = pd.matmul(W, x) a = layer.fc(W, x)
hidden_out = pd.matmul(U, h.pre(n=1)) b = layer.fc(U, hh)
sum = pd.add_two(fc_out, hidden_out) s = pd.add(a, b)
act = pd.sigmoid(sum) act = pd.sigmoid(s)
h.update(act) # update memory with act rnn.update_memory(h, act)
net.set_outputs(0, act, hidden_out) # two outputs rnn.output(a, b)
o1, o2 = rnn() o1, o2 = rnn()
print o1, o2
``` ```
has its equivalent C++ program as follows has its equivalent C++ program as follows
```c++ ```c++
int* x = {10, 20, 30}; int* x = {10, 20, 30};
int m = 0; int* m = {0};
int W = some_value(); int* W = {0.314};
int U = some_other_value(); int* U = {0.375};
int mem[sizeof(x) / sizeof(x[0]) + 1]; int mem[sizeof(x) / sizeof(x[0]) + 1];
int o1[sizeof(x) / sizeof(x[0]) + 1]; int o1[sizeof(x) / sizeof(x[0]) + 1];
...@@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1]; ...@@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1];
for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) {
int x = x[i-1]; int x = x[i-1];
if (i == 1) mem[0] = m; if (i == 1) mem[0] = m;
int fc_out = W * x; int a = W * x;
int hidden_out = Y * mem[i-1]; int b = Y * mem[i-1];
int sum = fc_out + hidden_out; int s = fc_out + hidden_out;
int act = sigmoid(sum); int act = sigmoid(sum);
mem[i] = act; mem[i] = act;
o1[i] = act; o1[i] = act;
o2[i] = hidden_out; o2[i] = hidden_out;
} }
print_array(o1);
print_array(o2);
``` ```
## Compilation and Execution ## Compilation and Execution
Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference.
...@@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20]) ...@@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20])
b = pd.fc(a, params=["fc.w", "fc.b"]) b = pd.fc(a, params=["fc.w", "fc.b"])
rnn = pd.create_rnn() rnn = pd.create_rnn()
with rnn.stepnet() as net: with rnn.stepnet()
x = net.set_inputs(a) x = a.as_step_input()
# reuse fc's parameter # reuse fc's parameter
fc_without_b = pd.get_variable("fc.w") fc_without_b = pd.get_variable("fc.w")
net.set_outputs(fc_without_b) rnn.output(fc_without_b)
out = rnn() out = rnn()
``` ```
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册