未验证 提交 7616d3bc 编写于 作者: D Double_V 提交者: GitHub

Update API in VOT to fit release/1.8 (#4623)

* take fluid.layers.data in place of fluid.data
* delete create_var_list_v2 func
上级 64eb26ae
......@@ -72,9 +72,9 @@ def Fconv2d(
groups mismatch.
Examples:
.. code-block:: python
data = fluid.layers.data(name='data', shape=[3, 32, 32], \
data = fluid.data(name='data', shape=[3, 32, 32], \
dtype='float32')
filter = fluid.layers.data(name='filter',shape=[10,3,3,3], \
filter = fluid.data(name='filter',shape=[10,3,3,3], \
dtype='float32',append_batch_size=False)
conv2d = fluid.layers.conv2d(input=data,
filter=filter,
......
......@@ -60,9 +60,9 @@ def Fconv2d(input,
groups mismatch.
Examples:
.. code-block:: python
data = fluid.layers.data(name='data', shape=[3, 32, 32], \
data = fluid.data(name='data', shape=[3, 32, 32], \
dtype='float32')
filter = fluid.layers.data(name='filter',shape=[10,3,3,3], \
filter = fluid.data(name='filter',shape=[10,3,3,3], \
dtype='float32',append_batch_size=False)
conv2d = fluid.layers.conv2d(input=data,
filter=filter,
......@@ -112,62 +112,4 @@ def Fconv2d(input,
return pre_bias
def test_conv2d_with_filter():
exemplar = np.random.random((8, 4, 6, 6)).astype(np.float32)
instance = np.random.random((8, 4, 22, 22)).astype(np.float32)
# fluid.layers.data(append_batch_size=)
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
train_program = fluid.Program()
start_program = fluid.Program()
with fluid.program_guard(train_program, start_program):
x = fluid.layers.data(
name="inst", shape=[8, 4, 22, 22], append_batch_size=False)
y = fluid.layers.data(
name="exem", shape=[8, 4, 6, 6], append_batch_size=False)
bias_att = fluid.ParamAttr(
name="bias_", initializer=fluid.initializer.ConstantInitializer(1.))
out = conv2d_with_filter(x, y, groups=1)
weight_att = fluid.ParamAttr(
name='weight',
initializer=fluid.initializer.NumpyArrayInitializer(exemplar))
bias_att = fluid.ParamAttr(
name="bias", initializer=fluid.initializer.ConstantInitializer(0.))
res = fluid.layers.conv2d(
x,
8,
6,
param_attr=weight_att,
bias_attr=bias_att,
stride=1,
padding=0,
dilation=1)
exe = fluid.Executor(place)
exe.run(program=fluid.default_startup_program())
print(out.shape)
compiled_prog = fluid.compiler.CompiledProgram(train_program)
out, res = exe.run(compiled_prog,
feed={"inst": instance,
"exem": exemplar},
fetch_list=[out.name, res.name])
print(np.sum(out - res))
np.testing.assert_allclose(out, res, rtol=1e-5, atol=0)
with fluid.dygraph.guard():
exem = fluid.dygraph.to_variable(exemplar)
inst = fluid.dygraph.to_variable(instance)
out = conv2d_with_filter(inst, exem, groups=1)
print(np.sum(out.numpy() - res))
np.testing.assert_allclose(out.numpy(), res, rtol=1e-5, atol=0)
if __name__ == '__main__':
test_conv2d_with_filter()
......@@ -3,7 +3,7 @@ from paddle.fluid import layers
from paddle import fluid
from pytracking.libs.tensorlist import TensorList
from pytracking.utils.plotting import plot_graph
from pytracking.libs.paddle_utils import n2p, clone, static_clone
from pytracking.libs.paddle_utils import n2p, clone, static_clone, create_var_list
class L2Problem:
......@@ -243,20 +243,9 @@ class ConjugateGradient(ConjugateGradientBase):
start_program = fluid.Program()
with fluid.program_guard(train_program, start_program):
scope = 'first/'
self.x_ph = TensorList([
fluid.layers.data(
'{}x_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.p_ph = TensorList([
fluid.layers.data(
'{}p_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.x_ph = TensorList(create_var_list(scope+"x", self.x, None))
self.p_ph = TensorList(create_var_list(scope+"p", self.x, None))
# problem forward
self.f0 = self.problem(self.x_ph, scope)
......@@ -277,20 +266,10 @@ class ConjugateGradient(ConjugateGradientBase):
start_program2 = fluid.Program()
with fluid.program_guard(train_program2, start_program2):
scope = 'second/'
self.x_ph_2 = TensorList([
fluid.layers.data(
'{}x_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.dfdx_x_ph = TensorList([
fluid.layers.data(
'{}dfdx_x_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.g)
])
self.x_ph_2 = TensorList(create_var_list(scope+"x", self.x, None))
self.dfdx_x_ph = TensorList(create_var_list(scope+"dfdx_x", self.g, None))
self.f0_2 = self.problem(self.x_ph_2, scope)
self.dfdx_dfdx = TensorList(
......@@ -444,20 +423,9 @@ class GaussNewtonCG(ConjugateGradientBase):
start_program = fluid.Program()
with fluid.program_guard(train_program, start_program):
scope = 'first/'
self.x_ph = TensorList([
fluid.layers.data(
'{}x_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.p_ph = TensorList([
fluid.layers.data(
'{}p_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.x_ph = TensorList(create_var_list(scope+"x", self.x, None))
self.p_ph = TensorList(create_var_list(scope+"p", self.x, None))
# problem forward
self.f0 = self.problem(self.x_ph, scope)
......@@ -477,20 +445,9 @@ class GaussNewtonCG(ConjugateGradientBase):
start_program2 = fluid.Program()
with fluid.program_guard(train_program2, start_program2):
scope = 'second/'
self.x_ph_2 = TensorList([
fluid.layers.data(
'{}x_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.dfdx_x_ph = TensorList([
fluid.layers.data(
'{}dfdx_x_{}'.format(scope, idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.g)
])
self.x_ph_2 = TensorList(create_var_list(scope+"x", self.x, None))
self.dfdx_x_ph = TensorList(create_var_list(scope+"dfdx_x", self.g, None))
self.f0_2 = self.problem(self.x_ph_2, scope)
self.dfdx_dfdx = TensorList(
......@@ -654,13 +611,7 @@ class GradientDescentL2:
train_program = fluid.Program()
start_program = fluid.Program()
with fluid.program_guard(train_program, start_program):
self.x_ph = TensorList([
fluid.layers.data(
'x_{}'.format(idx),
v.shape,
append_batch_size=False,
stop_gradient=False) for idx, v in enumerate(self.x)
])
self.x_ph = TensorList(create_var_list("x", self.x, None))
# problem forward
self.f0 = self.problem(self.x_ph)
......
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import dygraph
from paddle.fluid import layers
from paddle.fluid.framework import Variable
......@@ -216,3 +217,17 @@ def dropout2d(input, prob, is_train=False):
binary_tensor = layers.floor(random_tensor)
output = input / keep_prob * binary_tensor
return output
def create_var_list(scope, var_lists, shape):
vars = []
for idx, v in enumerate(var_lists):
name = "{}_{}".format(scope, idx)
if shape is None:
var = fluid.data(name, shape=v.shape)
else:
var = fluid.data(name, shape=shape + list(v[0].shape))
var.stop_gradient = False
vars.append(var)
return vars
......@@ -5,6 +5,7 @@ from paddle import fluid
from pytracking.libs import optimization, TensorList, operation
from pytracking.libs.paddle_utils import PTensor, broadcast_op, n2p, static_identity
import math
from pytracking.libs.paddle_utils import create_var_list
def stack_input(e):
......@@ -50,29 +51,18 @@ class FactorizedConvProblem(optimization.L2Problem):
def get_inputs(self, scope=''):
if scope not in self.inputs_dict:
training_samples_p = TensorList([
fluid.layers.data(
'{}training_samples_{}'.format(scope, idx),
shape=[None] + list(v[0].shape),
stop_gradient=False,
append_batch_size=False)
for idx, v in enumerate(self.training_samples)
])
y_p = TensorList([
fluid.layers.data(
'{}y_{}'.format(scope, idx),
shape=[None] + list(v[0].shape),
stop_gradient=False,
append_batch_size=False) for idx, v in enumerate(self.y)
])
sample_weights_p = TensorList([
fluid.layers.data(
'{}sample_weights_{}'.format(scope, idx),
shape=[None, 1],
stop_gradient=False,
append_batch_size=False)
for idx, v in enumerate(self.sample_weights)
])
name = scope + "training_samples"
vars = create_var_list(name, self.sample_weights, [None])
training_samples_p = TensorList(vars)
name = scope + "y"
vars = create_var_list(name, self.y, [None])
y_p = TensorList(vars)
name = scope + "sample_weights"
vars = create_var_list(name, self.sample_weights, [None, 1])
sample_weights_p = TensorList(vars)
self.inputs_dict[scope] = (training_samples_p, y_p,
sample_weights_p)
......@@ -189,29 +179,18 @@ class ConvProblem(optimization.L2Problem):
def get_inputs(self, scope=''):
if scope not in self.inputs_dict:
training_samples_p = TensorList([
fluid.layers.data(
'{}training_samples_{}'.format(scope, idx),
shape=[None] + list(v[0].shape),
stop_gradient=False,
append_batch_size=False)
for idx, v in enumerate(self.training_samples)
])
y_p = TensorList([
fluid.layers.data(
'{}y_{}'.format(scope, idx),
shape=[None] + list(v[0].shape),
stop_gradient=False,
append_batch_size=False) for idx, v in enumerate(self.y)
])
sample_weights_p = TensorList([
fluid.layers.data(
'{}sample_weights_{}'.format(scope, idx),
shape=[None] + list(v[0].shape),
stop_gradient=False,
append_batch_size=False)
for idx, v in enumerate(self.sample_weights)
])
name = scope + "training_samples"
vars = create_var_list(name, self.training_samples, [None])
training_samples_p = TensorList(vars)
name = scope + "y"
vars = create_var_list(name, self.y, [None])
y_p = TensorList(vars)
name = scope + "sample_weights"
vars = create_var_list(name, self.sample_weights, [None])
sample_weights_p = TensorList(vars)
self.inputs_dict[scope] = (training_samples_p, y_p,
sample_weights_p)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册