提交 388e699b 编写于 作者: qnqinan's avatar qnqinan

Merge branch 'develop' of https://github.com/PaddlePaddle/paddle-mobile into develop

...@@ -197,5 +197,17 @@ uint64_t vaddr_to_paddr(void *address) { ...@@ -197,5 +197,17 @@ uint64_t vaddr_to_paddr(void *address) {
return 0; return 0;
#endif #endif
} }
uint32_t paddle_mobile_version() {
uint32_t v_master = 34;
uint32_t v_slave = 34;
uint32_t first = 1, second = 2, fourth_master = 1, fourth_slave = 2;
uint32_t master = first << 24 | second << 16 | v_master << 8 | fourth_master;
uint32_t slave = first << 24 | second << 16 | v_slave << 8 | fourth_slave;
return slave;
}
} // namespace fpga } // namespace fpga
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -30,6 +30,7 @@ limitations under the License. */ ...@@ -30,6 +30,7 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace fpga { namespace fpga {
enum DataType { enum DataType {
DATA_TYPE_INT8 = 2, DATA_TYPE_INT8 = 2,
DATA_TYPE_FP32 = 1, DATA_TYPE_FP32 = 1,
...@@ -275,5 +276,8 @@ uint64_t vaddr_to_paddr(void* address); ...@@ -275,5 +276,8 @@ uint64_t vaddr_to_paddr(void* address);
void expand_conv_arg(ConvArgs* arg); void expand_conv_arg(ConvArgs* arg);
void expand_EW_arg(EWAddArgs* arg); void expand_EW_arg(EWAddArgs* arg);
inline int32_t convertmantissa(int32_t i); inline int32_t convertmantissa(int32_t i);
uint32_t paddle_mobile_version();
} // namespace fpga } // namespace fpga
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -30,10 +30,12 @@ limitations under the License. */ ...@@ -30,10 +30,12 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
namespace fpga { namespace fpga {
int open_device(); int open_device();
void* fpga_malloc(size_t size); void* fpga_malloc(size_t size);
void fpga_free(void* ptr); void fpga_free(void* ptr);
uint32_t paddle_mobile_version();
} // namespace fpga } // namespace fpga
#endif #endif
......
...@@ -139,6 +139,9 @@ PaddleMobileConfig GetConfig1() { ...@@ -139,6 +139,9 @@ PaddleMobileConfig GetConfig1() {
int main() { int main() {
open_device(); open_device();
timeval start11, end11;
long dif_sec, dif_usec; // NOLINT
PaddleMobileConfig config = GetConfig(); PaddleMobileConfig config = GetConfig();
auto predictor = auto predictor =
CreatePaddlePredictor<PaddleMobileConfig, CreatePaddlePredictor<PaddleMobileConfig,
...@@ -172,8 +175,6 @@ int main() { ...@@ -172,8 +175,6 @@ int main() {
std::cout << "Finishing feeding data " << std::endl; std::cout << "Finishing feeding data " << std::endl;
timeval start11, end11;
long dif_sec, dif_usec; // NOLINT
gettimeofday(&start11, NULL); gettimeofday(&start11, NULL);
predictor->Predict_From_To(0, -1); predictor->Predict_From_To(0, -1);
gettimeofday(&end11, NULL); gettimeofday(&end11, NULL);
...@@ -189,8 +190,9 @@ int main() { ...@@ -189,8 +190,9 @@ int main() {
std::cout << "Output number is " << v.size() << std::endl; std::cout << "Output number is " << v.size() << std::endl;
for (int fetchNum = 0; fetchNum < v.size(); fetchNum++) { for (int fetchNum = 0; fetchNum < v.size(); fetchNum++) {
std::string dumpName = "marker_api_fetch_" + std::to_string(fetchNum); std::string dumpName = "marker_api_fetch_" + std::to_string(fetchNum);
dump_stride(dumpName, v[fetchNum]); // dump_stride(dumpName, v[fetchNum]);
} }
fpga_free(img);
PaddleMobileConfig config1 = GetConfig1(); PaddleMobileConfig config1 = GetConfig1();
auto predictor1 = auto predictor1 =
...@@ -233,6 +235,7 @@ int main() { ...@@ -233,6 +235,7 @@ int main() {
std::string dumpName = "marker2_api_fetch_" + std::to_string(fetchNum); std::string dumpName = "marker2_api_fetch_" + std::to_string(fetchNum);
dump_stride(dumpName, v1[fetchNum]); dump_stride(dumpName, v1[fetchNum]);
} }
fpga_free(img1);
} }
return 0; return 0;
} }
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#define PADDLE_MOBILE_FPGA #define PADDLE_MOBILE_FPGA
#endif #endif
#include <fstream> #include <fstream>
#include <iomanip>
#include <iostream> #include <iostream>
#include "../../src/io/paddle_inference_api.h" #include "../../src/io/paddle_inference_api.h"
...@@ -69,7 +70,7 @@ PaddleMobileConfig GetConfig1() { ...@@ -69,7 +70,7 @@ PaddleMobileConfig GetConfig1() {
int main() { int main() {
open_device(); open_device();
#if 0
PaddleMobileConfig config1 = GetConfig1(); PaddleMobileConfig config1 = GetConfig1();
auto predictor1 = auto predictor1 =
CreatePaddlePredictor<PaddleMobileConfig, CreatePaddlePredictor<PaddleMobileConfig,
...@@ -98,7 +99,8 @@ int main() { ...@@ -98,7 +99,8 @@ int main() {
predictor1->FetchPaddleTensors(&v1); // Old data in v will be cleared predictor1->FetchPaddleTensors(&v1); // Old data in v will be cleared
std::cout << "Output number is " << v1.size() << std::endl; std::cout << "Output number is " << v1.size() << std::endl;
std::cout << "out[0] length " << v1[0].data.length() << std::endl; std::cout << "out[0] length " << v1[0].data.length() << std::endl;
fpga_free(img1);
#endif
//////////////////////////// ////////////////////////////
PaddleMobileConfig config = GetConfig(); PaddleMobileConfig config = GetConfig();
...@@ -160,6 +162,11 @@ int main() { ...@@ -160,6 +162,11 @@ int main() {
} }
} }
std::cout << "Finish getting vector values" << std::endl; std::cout << "Finish getting vector values" << std::endl;
fpga_free(img);
auto version = fpga::paddle_mobile_version();
std::cout << "0X0" << std::hex << version << std::endl;
return 0; return 0;
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册