提交 d80b780f 编写于 作者: sahduashufa's avatar sahduashufa

fullconnect layer api

上级 8b7d0b51
无法预览此类型文件
......@@ -10,9 +10,9 @@
> 项目开始日期 : 2019/10/01
> 目前项目总代码 : 709
> 目前项目总代码 : 810
>
> 测试 : main.cpp | nerual_network.cpp (新增Bp反向传播矩阵求导测试demo)
> 测试 : main.cpp | nerual_network.cpp | 新增全连接神经网络架构(新增全连接网络正向传播和反向传播的测试demo)
>
> 测试环境:
>
......@@ -26,36 +26,144 @@
> Thread model: posix
>
> 实现的:
>
> 进入到clone 之后的目录
———2019.11.28
## 安装编译
更新: 实现矩阵封装和自动求导接口.
git clone git@github.com:AllenZYJ/Edge-Computing-Engine.git
如何安装和运行基于Edge的测试demo.
cd to this dir `
`git clone git@github.com:AllenZYJ/Edge-Computing-Engine.git`
进入root目录:
进入到clone 之后的目录
执行
`cd to this dir `
```
make
make install
```
编译demo入口程序
`g++ main.cpp -o main`
```shell
➜ edge-computing-engine git:(master) ✗ g++ main.cpp -o ma -lautodiff
```
或者BP测试程序
`g++ nerual_network.cpp -o ma`
```shell
➜ edge-computing-engine git:(master) ✗ g++ nerual_network.cpp -o ma
```
运行
`'./main'`
```shell
➜ edge-computing-engine git:(master) ✗ ./main
```
`./ma`
## 新的demo程序实现5层全连接层,可自定义神经元和激活函数,损失函数
Bp反向传播基于Pytorch官方代码模拟实现测试
代码:
最终迭代结果 :
```c
Matrix data_mine = CreateRandMat(2,1);
Matrix label = CreateMatrix(2,1);
Matrix weight1 = CreateRandMat(2,2);
Matrix weight2 = CreateRandMat(2,2);
Matrix weight3 = CreateRandMat(2,2);
Matrix weight4 = CreateRandMat(2,2);
for(int epoch = 0;epoch<20;epoch++)
{
cout_mat(weight1);
edge_network sequaltial(2,2);
Matrix output1 = sequaltial.forward(data_mine,weight1);
Matrix output2 = sequaltial.forward(output1,weight2);
Matrix output3 = sequaltial.forward(output2,weight3);
Matrix output4 = sequaltial.forward(output3,weight4);
Matrix output_end = sequaltial.end_layer_backward(label,output4);
//get the forward
Matrix backward1 = sequaltial.backward(output_end,output3,weight4);
Matrix grad_w1w2 = mul_simple(backward1,data_mine);
Matrix backward2 = sequaltial.backward(backward1,output2,weight3);
Matrix grad_w3w4 = mul_simple(backward2,data_mine);
Matrix backward3 = sequaltial.backward(backward2,output1,weight2);
Matrix grad_w5w6 = mul_simple(backward3,data_mine);
Matrix backward4 = sequaltial.backward(backward3,output4,weight1);
Matrix grad_w7w8 = mul_simple(backward4,data_mine);
weight1 = subtract(weight1,times_mat(0.0001,padding(grad_w1w2,2,2)));
weight2 = subtract(weight2,times_mat(0.0001,padding(grad_w3w4,2,2)));
weight3 = subtract(weight3,times_mat(0.0001,padding(grad_w5w6,2,2)));
weight4 = subtract(weight4,times_mat(0.0001,padding(grad_w7w8,2,2)));
}
```
```python
---------epoch: 0------------
loss: 4.65667
loss: 3.28273
---------epoch: 1------------
loss: 4.65655
loss: 3.28265
---------epoch: 2------------
loss: 4.65643
loss: 3.28257
---------epoch: 3------------
loss: 4.65631
loss: 3.28249
---------epoch: 4------------
loss: 4.65619
loss: 3.2824
---------epoch: 5------------
loss: 4.65607
loss: 3.28232
---------epoch: 6------------
loss: 4.65596
loss: 3.28224
---------epoch: 7------------
loss: 4.65584
loss: 3.28216
---------epoch: 8------------
loss: 4.65572
loss: 3.28208
---------epoch: 9------------
loss: 4.6556
loss: 3.282
---------epoch: 10------------
loss: 4.65548
loss: 3.28192
---------epoch: 11------------
loss: 4.65536
loss: 3.28184
---------epoch: 12------------
loss: 4.65524
loss: 3.28176
---------epoch: 13------------
loss: 4.65512
loss: 3.28168
---------epoch: 14------------
loss: 4.65501
loss: 3.2816
---------epoch: 15------------
loss: 4.65489
loss: 3.28152
---------epoch: 16------------
loss: 4.65477
loss: 3.28144
---------epoch: 17------------
loss: 4.65465
loss: 3.28136
---------epoch: 18------------
loss: 4.65453
loss: 3.28128
---------epoch: 19------------
loss: 4.65441
loss: 3.2812
```
## Bp反向传播的demo程序基于Pytorch官方代码模拟实现测试
迭代结果 :
W1: 0.6944 1.52368
-1.46644 -0.154097
......@@ -76,10 +184,17 @@ epoch: 100 error: 6.05895
0.009167(sum of loss)
### 目前实现的程序接口
Matrix API:
### API:
- [x] Matrix read_csv(string &file_path)读取格式化文件(csv),返回一个自动计算长度的矩阵.
- [x] 实现格式化文件写入接口.比较pandas.to_csv.
- [x] 矩阵广播机制,实现padding接口
- [x] 全连接层前向传播和反向传播接口,支持自动求导
- [x] 矩阵微分和自动求导接口封装
- [x] int save_txt(Matrix mid1,string path = "./",string delimiter = ",",string header="./") 设计文件流获取文件头部接口 , 写入格式化文件 , 已设计支持矩阵类型数据写入,支持自定义表头,写入文件路径 , 自定义分隔符,默认为" , ".
......@@ -348,6 +463,7 @@ Something :
> 4. api接口更多的接近于pandas和numpy的使用习惯.
> 5. 更多的细节参见目前最新的代码
> 6. 欢迎star和关注.
> 7. autodiff部分感谢国外博主Omar的思路提醒.
>
------
......
......@@ -18,48 +18,61 @@ Node mean_square_error(Matrix mid1,Matrix mid2){
}
return loss;
}
struct edge_layer
struct edge_network
{
Matrix weights;
Matrix bias;
int input_shape;
int output_shape;
edge_layer(int input, int output,Matrix weights,Matrix label,string activation){
this->input_shape = input;
this->output_shape = output;
this->weights = weights;
edge_network(int input, int output){
}
Matrix forward(Matrix data,Matrix weights)
{
cout<<"data: ";
cout_mat(data);
cout<<"weights: "<<endl;
cout_mat(weights);
// cout<<"data: ";
// cout_mat(data);
// cout<<"weights: "<<endl;
// cout_mat(weights);
Matrix output = mul(weights,data);
// output = e_sigmoid(output);
cout<<"forward output: ";
cout_mat(output);
output = e_sigmoid(output);
// cout<<"forward output: ";
// cout_mat(output);
// cout<<"----------forward finish--------"<<endl;
return output;
}
Matrix backward(Matrix output,Matrix label,const float& learning_rate,Matrix grad_before)
Matrix backward(Matrix grad_next, Matrix output_before,Matrix weights)
{
Matrix grad = CreateMatrix(weights.row,weights.col);
// cout<<"-----------backward-------------"<<endl;
// cout_mat(grad_next);
// cout<<"-----------grad_next------------"<<endl;
// cout_mat(output_before);
// cout<<"-----------output before--------"<<endl;
// cout_mat(get_T(weights));
// cout<<"-----------TTTTTweights--------------"<<endl;
// cout_mat(padding(mul_simple(mul(get_T(weights),grad_next),output_before),2,2));
// cout<<"[[[[[";
return mul_simple(mul(get_T(weights),grad_next),output_before);
for(int index_x=0;index_x<weights.row;index_x++)
{
Node target=label.matrix[index_x][0],_y_ = output.matrix[index_x][0];
cout<<"target: "<<target<<endl;
Node loss = target-pow(_y_,2);
cout<<"loss: "<<loss<<endl;
for(int index_y = 0;index_y<weights.col;index_y++)
{
grad.matrix[index_x][index_y] = loss.gradient(_y_);
weights.matrix[index_x][index_y]-=loss.gradient(_y_);
}
}
grad = mul_simple(grad,grad_before);
cout<<"grad:";
cout_mat(grad);
}
Matrix end_layer_backward(Matrix label,Matrix acti_val)
{
Matrix loss_act = CreateMatrix(acti_val.row,acti_val.col);
Matrix act_output = CreateMatrix(acti_val.row,acti_val.col);
int index_x,index_y;
for(index_x=0;index_x<loss_act.row;index_x++)
{
Node t1 = label.matrix[index_x][0],z31 =acti_val.matrix[index_x][0];
Node a13 = 1+exp(z31);
Node loss = 0.5*(pow((t1-a13),2));
Node act = 1/(1+exp(z31));
act_output.matrix[index_x][0] = act.gradient(z31);
loss_act.matrix[index_x][0] = loss.gradient(a13);
// cout<<"z31: "<<z31<<endl;
// cout<<"z31_grad: "<<act.gradient(z31)<<endl;
// cout<<"a13: "<<a13<<"t1: "<<t1<<endl;
cout<<"loss: "<<loss<<endl;
// cout<<"gradient: "<<loss.gradient(a13)<<endl;
}
Matrix mid_grad_end = mul_simple(loss_act,act_output);
// cout<<"[[[[[[[[";
// cout_mat(mid_grad_end);
return mid_grad_end;
}
};
无法预览此类型文件
......@@ -45,31 +45,37 @@ int main()
}
stop = clock();
printf("%f\n", (double)(stop - start) / CLOCKS_PER_SEC);
Node u=2, p=3.2,tt = 10,ll = u*p;
Node f = 1/(1+exp(ll));
std::cout << "f(x,y) = 1/1+exp(2*3)"<<f<< std::endl;
std::cout << "∂f/∂u = " << f.gradient(ll) << std::endl;
std::cout << "∂f/∂p = " << f.gradient(p) << std::endl;
cout<<"------------autodiff for neraul network-----------"<<endl;
Matrix data1 = CreateRandMat(2,1);
Matrix label_data = CreateMatrix(2,1);
change_va(label_data,0,0,1);
change_va(label_data,1,0,1);
cout_mat(data1);
cout<<"------------data1----------------"<<endl;
Matrix we1 = CreateRandMat(2,2);
cout_mat(we1);
cout<<"------------weight-----------"<<endl;
edge_layer layer(2,2,we1,label_data,"tanh");
Matrix grad_before1 = ones(2,2);
Matrix out_put = layer.forward(data1,we1);
Matrix grad_before2 = layer.backward(out_put,label_data,0.1,grad_before1);
Matrix grad_we1 = CreateRandMat(2,2);
we1 = subtract(we1,grad_we1);
cout<<"------------first backward---------"<<endl;
edge_layer layer_2(layer.output_shape,2,we1,label_data,"tanh");
Matrix out_put2 = layer_2.forward(out_put,we1);
layer_2.backward(out_put2,label_data,0.1,grad_before2);
Matrix data_mine = CreateRandMat(2,1);
Matrix label = CreateMatrix(2,1);
Matrix weight1 = CreateRandMat(2,2);
Matrix weight2 = CreateRandMat(2,2);
Matrix weight3 = CreateRandMat(2,2);
Matrix weight4 = CreateRandMat(2,2);
for(int epoch = 0;epoch<20;epoch++)
{
cout<<"---------epoch: "<<epoch<<"------------"<<endl;
// cout_mat(weight1);
edge_network sequaltial(2,2);
Matrix output1 = sequaltial.forward(data_mine,weight1);
Matrix output2 = sequaltial.forward(output1,weight2);
Matrix output3 = sequaltial.forward(output2,weight3);
Matrix output4 = sequaltial.forward(output3,weight4);
Matrix output_end = sequaltial.end_layer_backward(label,output4);
//get the forward
Matrix backward1 = sequaltial.backward(output_end,output3,weight4);
Matrix grad_w1w2 = mul_simple(backward1,data_mine);
Matrix backward2 = sequaltial.backward(backward1,output2,weight3);
Matrix grad_w3w4 = mul_simple(backward2,data_mine);
Matrix backward3 = sequaltial.backward(backward2,output1,weight2);
Matrix grad_w5w6 = mul_simple(backward3,data_mine);
Matrix backward4 = sequaltial.backward(backward3,output4,weight1);
Matrix grad_w7w8 = mul_simple(backward4,data_mine);
weight1 = subtract(weight1,times_mat(0.0001,padding(grad_w1w2,2,2)));
weight2 = subtract(weight2,times_mat(0.0001,padding(grad_w3w4,2,2)));
weight3 = subtract(weight3,times_mat(0.0001,padding(grad_w5w6,2,2)));
weight4 = subtract(weight4,times_mat(0.0001,padding(grad_w7w8,2,2)));
}
return 0;
}
......@@ -227,7 +227,7 @@ void cout_mat(Matrix mid1)
{
for(int index_y=0;index_y<mid1.col;index_y++)
{
cout<<mid1.matrix[index_x][index_y]<<" ";
cout<<mid1.matrix[index_x][index_y]<<",";
}
cout<<endl;
}
......@@ -320,4 +320,16 @@ Matrix mat_sq_loss(Matrix mid1,Matrix mid2)
}
return mat_sq;
}
Matrix padding(Matrix mid1,int shape1,int shape2)
{
Matrix result = CreateMatrix(shape1,shape2);
for(int index_x = 0;index_x<shape1;index_x++)
{
for(int index_y = 0;index_y<shape2;index_y++)
{
result.matrix[index_x][index_y] = mid1.matrix[index_x][0];
}
}
return result;
}
#endif
#include<iostream>
#include<ctime>
#include<string>
#include<time.h>
#include<math.h>
#include<fstream>
#include<stdlib.h>
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
using namespace std;
clock_t start, stop;
double duration;
int main()
{
welcome();
string path = "./data/nerual_data.csv";
ofstream fout( "mytest.txt" );
Matrix data = read_csv(path);
Matrix bais = CreateMatrix(data.row,1);
Matrix x = iloc(data,0,100,0,2);
Matrix y = iloc(data,0,100,2,3);
int N=100,in_Dim=2,H_num=2,out_Dim=2;
double learning_rate = 0.0001;
Matrix W1 = CreateRandMat(in_Dim,H_num);
Matrix W2 = CreateRandMat(H_num,out_Dim);
cout_mat(W1);
cout_mat(W2);
for(int epoch = 0;epoch<500;epoch++)
{
Matrix x_w1 = mul(x,W1);
Matrix re = mat_relu(x_w1);
Matrix out = mul(re,W2);
Matrix mat_sq = mat_sq_loss(out,y);
Matrix grad_y_pred = times_mat(2.0,subtract(out,y));
Matrix grad_w2 = mul(get_T(re),grad_y_pred);
Matrix grad_h_relu = mul(grad_y_pred,get_T(W2));
Matrix grad_h_relu_copy = mat_relu(grad_h_relu);
Matrix grad_w1 = mul(get_T(x),grad_h_relu_copy);
Matrix dw1 = times_mat(learning_rate,mul(get_T(x),grad_h_relu_copy));
W1 = subtract(W1,dw1);
W2 = subtract(W2,times_mat(learning_rate,grad_w2));
cout<<"W1: ";
cout_mat(W1);
cout<<"W2: ";
cout_mat(W2);
cout<<"loss"<<": ";
cout<<matrix_sum(mat_sq)/100<<endl;
save_txt(W1,"result.csv",",","header");
}
}
文件已删除
文件已删除
#include<iostream>
#include<ctime>
#include<string>
#include <time.h>
#include <math.h>
#include <fstream>
#include"./autodiff/node.h"
#include"./matrix/matrix_def.h"
#include"./matrix/matrix_pro.h"
#include"./welcome/score_wel.cpp"
#include"./logistic/logistic_def.h"
#include"./file_pro/data_read.h"
using namespace std;
int main()
{
Node u=2, p=3,tt = 10;
Node f = u*p*p+exp(tt)+cos(u) + sin(p);
std::cout << "∂f/∂c = " << f.gradient(tt) << std::endl;
std::cout << "∂f/∂y = " << f.gradient(p) << std::endl;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册