diff --git a/GCN-PyTorch-master/layer.py b/GCN-PyTorch-master/layer.py index 9a3bc2df56be153cda29aa0e6a4667e41b08b19e..cff29c2cf4bb3d1319913de9e2a4db5149df1fca 100644 --- a/GCN-PyTorch-master/layer.py +++ b/GCN-PyTorch-master/layer.py @@ -24,34 +24,36 @@ class GraphConvolution(nn.Module): self.featureless = featureless # 如果啥都没有,那就没有 self.num_features_nonzero = num_features_nonzero # 返回的是如果有值的时候他值的个数 - self.weight = nn.Parameter(torch.randn(input_dim, output_dim)) # 学习参数,上边的bias那个地方写的公式 - self.bias = None # 他这个地方强制置成零了 + self.weight = nn.Parameter(torch.randn(input_dim, output_dim)) # 学习参数,上边的bias那个地方写的公式,给踢个random + self.bias = None # 他这个地方强制置成零了 if bias: - self.bias = nn.Parameter(torch.zeros(output_dim)) # 他这个给bias了一个可学的参数 + self.bias = nn.Parameter(torch.zeros(output_dim)) # 他这个给bias了一个可学的参数 def forward(self, inputs): - # print('inputs:', inputs) - x, support = inputs + # print('inputs:', inputs), + # 这个地方的input就是那个连接矩阵,用它做的前馈函数 + x, support = inputs # 这个接进来,support做的Adcexcay + # 确定在train的状态下,稀疏矩阵和非稀疏矩阵是两种不同的dropout方法 if self.training and self.is_sparse_inputs: x = sparse_dropout(x, self.dropout, self.num_features_nonzero) elif self.training: x = F.dropout(x, self.dropout) - # convolve + # convolve,看一下有没有特征 if not self.featureless: # if it has features x if self.is_sparse_inputs: - xw = torch.sparse.mm(x, self.weight) + xw = torch.sparse.mm(x, self.weight) # 把x和weight相乘 else: xw = torch.mm(x, self.weight) else: xw = self.weight - out = torch.sparse.mm(support, xw) + out = torch.sparse.mm(support, xw) # 把support和weight乘起来 if self.bias is not None: - out += self.bias + out += self.bias # 加了bias - return self.activation(out), support + return self.activation(out), support # 这个前向函数写的有问题吧我感觉?! diff --git a/GCN-PyTorch-master/model.py b/GCN-PyTorch-master/model.py index 8f7741a3536b278791327fe70ca598c2a05de05a..ead5eb7784ae7856494ff8e4e4c49b9d60312676 100644 --- a/GCN-PyTorch-master/model.py +++ b/GCN-PyTorch-master/model.py @@ -6,7 +6,7 @@ from layer import GraphConvolution from config import args class GCN(nn.Module): - + # https://www.educba.com/torch-dot-nn-module/ def __init__(self, input_dim, output_dim, num_features_nonzero): super(GCN, self).__init__() @@ -21,31 +21,38 @@ class GCN(nn.Module): print('output dim:', output_dim) print('num_features_nonzero:', num_features_nonzero) - + # 这个地方实际上定义的是两层,就是说,输入输出之间增加了一个隐含层,然后他配合了一下维度把他连接起来了, + # 定义的是两层自己层内的特性,层之间的链接还没有 + # https://blog.csdn.net/dss_dssssd/article/details/82980222,Sequential 相当于将各个层加入一个容器里边, + # 而且我这个地方调用的时候调用的是类,相当于连接的部分nn.Sequential直接就把forward函数调用进来了, + # 这个地方还没有调用forward函数? self.layers = nn.Sequential(GraphConvolution(self.input_dim, args.hidden, num_features_nonzero, activation=F.relu, dropout=args.dropout, - is_sparse_inputs=True), + is_sparse_inputs=True), # 这个地方是输入->隐层 GraphConvolution(args.hidden, output_dim, num_features_nonzero, activation=F.relu, dropout=args.dropout, - is_sparse_inputs=False), - + is_sparse_inputs=False), # 这个地方是隐层->输出 ) - def forward(self, inputs): + + def forward(self, inputs) : + # 这个地方调用了forword函数,稍等一下再看那个forward函数,感觉他有点问题哈 x, support = inputs - x = self.layers((x, support)) + x = self.layers((x, support)) # 这个地方调用了layer里边的forword函数,这个layer这个里边只有一个函数,所以直接用就行了, + # 他这个相当于直接调用了一个元组扔进去了,x为feature信息,support为联通矩阵的信息 return x def l2_loss(self): - + # L2型的损失函数,利用l2范数计算损失情况:(误差的平方)的和,可以有稳定的单一解 + # https://zhuanlan.zhihu.com/p/52203156,这个下边的layer是抽取了nn.Model里边的一层 layer = self.layers.children() layer = next(iter(layer)) - + # todo: 明天看一下Python迭代器的东西 loss = None for p in layer.parameters(): diff --git a/GCN-PyTorch-master/train.py b/GCN-PyTorch-master/train.py index e93b8b17b8bc3e1875667a9556d8dcaa8e11795f..0e53ecd6b50be1fa4aa9eee8d9d0d84de4d39a27 100644 --- a/GCN-PyTorch-master/train.py +++ b/GCN-PyTorch-master/train.py @@ -69,7 +69,8 @@ feature = torch.sparse.FloatTensor(i.t(), v, features[2]).to(device) supports = preprocess_adj(adj) # 把A搞成这个东西的返回值就是那个系数D^-0.5AD^0.5 i = torch.from_numpy(supports[0]).long().to(device) v = torch.from_numpy(supports[1]).to(device) -support = torch.sparse.FloatTensor(i.t(), v, supports[2]).float().to(device) +support = torch.sparse.FloatTensor(i.t(), v, supports[2]).float().to(device) # support是内个连接矩阵的常见版本 + print('x :', feature) print('sp:', support) @@ -82,11 +83,13 @@ feat_dim = feature.shape[1] # 返回的是那个非稀疏矩阵的维度 # 开始train net了 -net = GCN(feat_dim, num_classes, num_features_nonzero) -net.to(device) -optimizer = optim.Adam(net.parameters(), lr=args.learning_rate) +net = GCN(feat_dim, num_classes, num_features_nonzero) # 投进去的只有数据和特征 +# torch.nn.Parameter(data,requires_grad) +net.to(device) # CUDA +optimizer = optim.Adam(net.parameters(), lr=args.learning_rate) # 底层加速计算用的 -net.train() +net.train() # 一般在开始训练代码的地方加入,涉及Dropout和batch normalizatuon的时候用这个是必须的 +# https://blog.csdn.net/qq_46284579/article/details/120439049,如果没有DP和BN,这两个函数的影响好坏不定, 不用细致操作,不是类函数 for epoch in range(args.epochs): out = net((feature, support)) @@ -104,7 +107,7 @@ for epoch in range(args.epochs): print(epoch, loss.item(), acc.item()) -net.eval() +net.eval() # 一般在测试的地方加入,涉及Dropout和batch normalizatuon的时候用这个是必须的 out = net((feature, support)) out = out[0]