提交 7c0facd1 编写于 作者: Q qijun

init

上级 17b4cea4
......@@ -18,8 +18,10 @@ limitations under the License. */
#include <string>
#include <unordered_map>
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/variable.h"
#include "paddle/platform/macros.h"
#include "paddle/platform/place.h"
namespace paddle {
namespace framework {
......@@ -75,5 +77,65 @@ class Scope {
framework::Scope& GetGlobalScope();
// template <typename T>
// void SetFeedVariable(const std::vector<T>& input, const Lod& lod,
// const std::vector<int64_t>& dims,
// const std::string& var_name, size_t index) {
// Variable* g_feed_value = GetGlobalScope().Var("var_name");
// // feed variable holds vector<LodTensor>
// auto& feed_inputs =
// *(g_feed_value->GetMutable<
// std::vector<paddle::framework::LoDTensor>>());
// if (index >= feed_inputs.size()) {
// feed_inputs.resize(index);
// }
// // copy tensor
// T* dst = feed_inputs[index].mutable_data<T>(make_ddim(dims),
// platform::CPUPlace());
// memcpy(dst, inputs[i].data(), inputs[i].size() * sizeof(T));
// // copy lod
// feed_inputs[index].set_lod(lod);
// }
template <typename T>
void SetFeedVariable(const LoDTensor& input, const std::string& var_name,
size_t index) {
std::cout << "into SetFeedVariable" << std::endl;
std::cout << var_name << std::endl;
std::cout << index << std::endl;
Variable* g_feed_value = GetGlobalScope().Var(var_name);
auto& feed_inputs =
*(g_feed_value->GetMutable<std::vector<paddle::framework::LoDTensor>>());
if (index >= feed_inputs.size()) {
feed_inputs.resize(index + 1);
}
// shared data with input tensor
feed_inputs[index].ShareDataWith<T>(input);
// set lod
feed_inputs[index].set_lod(input.lod());
}
// template <typename T>
// std::vector<T> GetFetchVariable(const std::string& var_name, size_t index) {
// Variable* g_fetch_value = GetGlobalScope().Var(var_name);
// auto& fetch_outputs =
// *(g_fetch_value->GetMutable<
// std::vector<paddle::framework::LoDTensor>>());
// std::vector<T> result;
// result.resize(fetch_outputs[index].numel());
// memcpy(result.data(), fetch_outputs[i].data<T>(),
// fetch_outputs[i].numel() * sizeof(T));
// }
template <typename T>
LoDTensor& GetFetchVariable(const std::string& var_name, size_t index) {
Variable* g_fetch_value = GetGlobalScope().Var(var_name);
auto& fetch_outputs =
*(g_fetch_value->GetMutable<std::vector<paddle::framework::LoDTensor>>());
std::cout << "into GetFetchVariable" << std::endl;
PADDLE_ENFORCE_LT(index, fetch_outputs.size());
return fetch_outputs[index];
}
} // namespace framework
} // namespace paddle
......@@ -394,6 +394,12 @@ All parameter, weight, gradient are variables in Paddle.
m.def("unique_integer", UniqueIntegerGenerator);
m.def("is_compile_gpu", IsCompileGPU);
m.def("set_feed_variable", SetFeedVariable<float>);
// m.def("set_feed_variable", SetFeedVariable<double>);
// m.def("set_feed_variable", SetFeedVariable<int>);
m.def("get_fetch_variable", GetFetchVariable<float>);
// m.def("get_fetch_variable", GetFetchVariable<double>);
// m.def("get_fetch_variable", GetFetchVariable<int>);
BindProgramDesc(m);
BindBlockDesc(m);
......
import paddle.v2.framework.core as core
import unittest
import numpy as np
# class TestFeedFetch(unittest.TestCase):
# def test_feed_fetch(self):
# place = core.CPUPlace()
# input_tensor = core.LoDTensor([[0, 2, 4]])
# input_tensor.set_dims([4, 4, 6])
# input_tensor.alloc_int(place)
# input_array = np.array(input_tensor)
# input_array[0, 0, 0] = 3
# input_array[3, 3, 5] = 10
# input_tensor.set(input_array, place)
# core.set_feed_variable(input_tensor, "feed", 0)
# output_tensor = core.get_fetch_variable("feed", 0)
# print type(output_tensor)
# output_lod = output_tensor.lod()
# print type(output_lod)
# print output_lod[0]
# print output_lod[0][0]
# print output_lod[0][1]
# print output_lod[0][2]
# # self.assertEqual(0, output_lod[0][0])
# # self.assertEqual(0, output_lod[0][0])
# # self.assertEqual(2, output_lod[0][1])
# # self.assertEqual(4, output_lod[0][2])
# # output_array = np.array(output_tensor)
# # self.assertEqual(3, output_array[0, 0, 0])
# # self.assertEqual(10, output_array[3, 3, 5]);
class TestFeedFetch(unittest.TestCase):
def test_feed_fetch(self):
place = core.CPUPlace()
input_tensor = core.LoDTensor([[0, 2, 4]])
input_tensor.set_dims([4, 4, 6])
input_tensor.alloc_float(place)
input_array = np.array(input_tensor)
input_array[0, 0, 0] = 3
input_array[3, 3, 5] = 10
input_tensor.set(input_array, place)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册