Skip to content

  • 体验新版
    • 正在加载...
  • 登录
  • PaddlePaddle
  • models
  • Issue
  • #4794

M
models
  • 项目概览

PaddlePaddle / models
大约 2 年 前同步成功

通知 232
Star 6828
Fork 2962
  • 代码
    • 文件
    • 提交
    • 分支
    • Tags
    • 贡献者
    • 分支图
    • Diff
  • Issue 602
    • 列表
    • 看板
    • 标记
    • 里程碑
  • 合并请求 255
  • Wiki 0
    • Wiki
  • 分析
    • 仓库
    • DevOps
  • 项目成员
  • Pages
M
models
  • 项目概览
    • 项目概览
    • 详情
    • 发布
  • 仓库
    • 仓库
    • 文件
    • 提交
    • 分支
    • 标签
    • 贡献者
    • 分支图
    • 比较
  • Issue 602
    • Issue 602
    • 列表
    • 看板
    • 标记
    • 里程碑
  • 合并请求 255
    • 合并请求 255
  • Pages
  • 分析
    • 分析
    • 仓库分析
    • DevOps
  • Wiki 0
    • Wiki
  • 成员
    • 成员
  • 收起侧边栏
  • 动态
  • 分支图
  • 创建新Issue
  • 提交
  • Issue看板
已关闭
开放中
Opened 8月 07, 2020 by saxon_zh@saxon_zhGuest

[论文复现]BIGGAN SA模块 grad报错

Created by: yxhpy

import paddle.fluid as fluid
import paddle
from paddle.fluid import layers
import paddle.fluid.dygraph as dg
import matplotlib.pyplot as plt
import numpy as np

class SoftMax(dg.Layer):
  def __init__(self, **kwargs):
    super().__init__()
    self.kwargs = kwargs
  
  def forward(self, x):
    return layers.softmax(x, **self.kwargs)
class SpectralNorm(dg.SpectralNorm):
  def __init__(self, module, weight_name='weight', power_iterations=1, **kwargs):
    weight_shape = getattr(module, weight_name).shape
    if 'dim' not in kwargs:
      if isinstance(module, ( # dg.Conv1D, dg.Conv1DTranspose,
                          dg.Conv2D, dg.Conv2DTranspose,
                          dg.Conv3D, dg.Conv3DTranspose)):
          kwargs['dim'] = 0
      else:
          kwargs['dim'] = 1
    kwargs['power_iters'] = power_iterations
    if 'weight_shape' in kwargs:
      kwargs.pop('weight_shape')
    super().__init__(weight_shape, **kwargs)
    self.weight = getattr(module, weight_name)

    del module._parameters[weight_name]
    self.module = module
    self.weight_name = weight_name
  
  def forward(self, *args, **kwargs):
    weight_norm = super().forward(self.weight)
    setattr(self.module, self.weight_name, weight_norm)
    out = self.module(*args, **kwargs)
    return out
class SelfAttention(dg.Layer):
  def __init__(self, in_dim, activation=layers.relu):
    super().__init__()
    self.chanel_in = in_dim
    self.activation = activation
 
    self.theta = SpectralNorm(dg.Conv2D(in_dim, in_dim // 8, 1, bias_attr=False))
    self.phi = SpectralNorm(dg.Conv2D(in_dim, in_dim // 8, 1, bias_attr=False))
    self.pool = dg.Pool2D(2, 'max', 2)
    self.g = SpectralNorm(dg.Conv2D(in_dim, in_dim // 2, 1, bias_attr=False))
    self.o_conv = SpectralNorm(dg.Conv2D(in_dim // 2, in_dim, 1, bias_attr=False))
    self.gamma = self.create_parameter([1,], default_initializer=fluid.initializer.Constant(0.0))
 
    self.softmax = SoftMax(axis=-1)
 
  def forward(self, x):
    m_batchsize, C, width, height = x.shape
    N = height * width
 
    theta = self.theta(x)
    phi = self.phi(x)
    phi = self.pool(phi)
    phi = layers.reshape(phi,(m_batchsize, -1, N // 4))
    theta = layers.reshape(theta,(m_batchsize, -1, N))
    theta = layers.transpose(theta,(0, 2, 1))
    attention = self.softmax(layers.bmm(theta, phi))
    g = layers.reshape(self.pool(self.g(x)),(m_batchsize, -1, N // 4))
    attn_g = layers.reshape(layers.bmm(g, layers.transpose(attention,(0, 2, 1))),(m_batchsize, -1, width, height))
    out = self.o_conv(attn_g)
    return self.gamma * out + x
with fluid.dygraph.guard():
    x = fluid.layers.uniform_random(shape=(10, 1024, 28 ,28))
    x = fluid.dygraph.to_variable(x)
    x.stop_gradient=False
    y = SelfAttention(in_dim=1024)(x)
    grads = fluid.dygraph.grad(y, [x])[0]
    print(grads)

---------------------------------------------------------------------------EnforceNotMet                             Traceback (most recent call last)<ipython-input-6-eb997cf843ba> in <module>
      4     x.stop_gradient=False
      5     y = SelfAttention(in_dim=1024)(x)
----> 6     grads = fluid.dygraph.grad(y, [x])[0]
      7     print(grads)
</opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-153> in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused, no_grad_vars, backward_strategy)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/wrapped_decorator.py in __impl__(func, *args, **kwargs)
     23     def __impl__(func, *args, **kwargs):
     24         wrapped_func = decorator_func(func)
---> 25         return wrapped_func(*args, **kwargs)
     26 
     27     return __impl__
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in __impl__(*args, **kwargs)
    214         assert in_dygraph_mode(
    215         ), "We Only support %s in imperative mode, please use fluid.dygraph.guard() as context to run it in imperative Mode" % func.__name__
--> 216         return func(*args, **kwargs)
    217 
    218     return __impl__
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused, no_grad_vars, backward_strategy)
    487     return core.dygraph_partial_grad(
    488         inputs, outputs, grad_outputs, no_grad_vars, place, backward_strategy,
--> 489         create_graph, retain_graph, allow_unused, only_inputs)
    490 
    491 
EnforceNotMet: 

--------------------------------------------
C++ Call Stacks (More useful to developers):
--------------------------------------------
0   std::string paddle::platform::GetTraceBackString<std::string const&>(std::string const&, char const*, int)
1   paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const*, int)
2   paddle::imperative::ReadyGradVarInfoMap::GetTarget(paddle::imperative::VariableWrapper const*) const
3   paddle::imperative::PartialGradTask::CreateResult()
4   paddle::imperative::PartialGradTask::Run()
5   paddle::imperative::PartialGradEngine::Execute()

----------------------
Error Message Summary:
----------------------
PermissionDeniedError: Target var tmp_0@GRAD should not be nullptr
  [Hint: iter->second should not be null.] at (/paddle/paddle/fluid/imperative/partial_grad_engine.cc:501)
指派人
分配到
无
里程碑
无
分配里程碑
工时统计
无
截止日期
无
标识: paddlepaddle/models#4794
渝ICP备2023009037号

京公网安备11010502055752号

网络110报警服务 Powered by GitLab CE v13.7
开源知识
Git 入门 Pro Git 电子书 在线学 Git
Markdown 基础入门 IT 技术知识开源图谱
帮助
使用手册 反馈建议 博客
《GitCode 隐私声明》 《GitCode 服务条款》 关于GitCode
Powered by GitLab CE v13.7