Skip to content

Commit

Permalink
Merge branch 'develop' into support_custom_lite_dev
Browse files Browse the repository at this point in the history
  • Loading branch information
DefTruth committed May 8, 2023
2 parents 1372686 + 77cb9db commit 0f798a9
Show file tree
Hide file tree
Showing 31 changed files with 1,835 additions and 51 deletions.
6 changes: 6 additions & 0 deletions benchmark/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ add_executable(benchmark_ttfnet ${PROJECT_SOURCE_DIR}/benchmark_ttfnet.cc)
add_executable(benchmark ${PROJECT_SOURCE_DIR}/benchmark.cc)
add_executable(benchmark_ppdet ${PROJECT_SOURCE_DIR}/benchmark_ppdet.cc)
add_executable(benchmark_dino ${PROJECT_SOURCE_DIR}/benchmark_dino.cc)
add_executable(benchmark_ppshituv2_rec ${PROJECT_SOURCE_DIR}/benchmark_ppshituv2_rec.cc)
add_executable(benchmark_ppshituv2_det ${PROJECT_SOURCE_DIR}/benchmark_ppshituv2_det.cc)

if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags pthread)
Expand Down Expand Up @@ -75,6 +77,8 @@ if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
target_link_libraries(benchmark ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ppdet ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_dino ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ppshituv2_rec ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ppshituv2_det ${FASTDEPLOY_LIBS} gflags pthread)
else()
target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppyolov5 ${FASTDEPLOY_LIBS} gflags)
Expand Down Expand Up @@ -109,6 +113,8 @@ else()
target_link_libraries(benchmark ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppdet ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_dino ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppshituv2_rec ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppshituv2_det ${FASTDEPLOY_LIBS} gflags)
endif()
# only for Android ADB test
if(ANDROID)
Expand Down
32 changes: 20 additions & 12 deletions benchmark/cpp/benchmark.cc
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,23 @@ DEFINE_string(dtypes, "FP32", "Set input dtypes for model.");
DEFINE_string(trt_shapes, "1,3,224,224:1,3,224,224:1,3,224,224",
"Set min/opt/max shape for trt/paddle_trt backend."
"eg:--trt_shape 1,3,224,224:1,3,224,224:1,3,224,224");
DEFINE_int32(batch, 1, "trt max batch size, default=1");
DEFINE_int32(batch, 1, "trt max batch size, default=1");
DEFINE_bool(dump, false, "whether to dump output tensors.");
DEFINE_bool(info, false, "only check the input infos of model");
DEFINE_bool(diff, false, "check the diff between two tensors.");
DEFINE_string(tensors, "tensor_a.txt:tensor_b.txt",
"The paths to dumped tensors.");
DEFINE_bool(mem, false, "Whether to force to collect memory info.");
DEFINE_int32(interval, -1, "Sampling interval for collect memory info.");
DEFINE_string(model_file, "UNKNOWN", "Optional, set specific model file,"
"eg, model.pdmodel, model.onnx");
DEFINE_string(params_file, "", "Optional, set specific params file,"
"eg, model.pdiparams.");
DEFINE_string(model_format, "PADDLE", "Optional, set specific model format,"
"eg, PADDLE/ONNX/RKNN/TORCHSCRIPT/SOPHGO");
DEFINE_string(model_file, "UNKNOWN",
"Optional, set specific model file,"
"eg, model.pdmodel, model.onnx");
DEFINE_string(params_file, "",
"Optional, set specific params file,"
"eg, model.pdiparams.");
DEFINE_string(model_format, "PADDLE",
"Optional, set specific model format,"
"eg, PADDLE/ONNX/RKNN/TORCHSCRIPT/SOPHGO");
DEFINE_bool(disable_mkldnn, false, "disable mkldnn for paddle backend");

#if defined(ENABLE_BENCHMARK)
Expand Down Expand Up @@ -117,9 +120,9 @@ static void RuntimeProfiling(int argc, char* argv[]) {
model_file = FLAGS_model + sep + FLAGS_model_file;
params_file = FLAGS_model + sep + FLAGS_params_file;
model_format = GetModelFormat(FLAGS_model_format);
if (model_format == fastdeploy::ModelFormat::PADDLE
&& FLAGS_params_file == "") {
std::cout << "[ERROR] params_file can not be empty for PADDLE"
if (model_format == fastdeploy::ModelFormat::PADDLE &&
FLAGS_params_file == "") {
std::cout << "[ERROR] params_file can not be empty for PADDLE"
<< " format, Please, set your custom params_file manually."
<< std::endl;
return;
Expand All @@ -134,7 +137,7 @@ static void RuntimeProfiling(int argc, char* argv[]) {
model_file = FLAGS_model + sep + model_name;
params_file = FLAGS_model + sep + params_name;
}

option.SetModelPath(model_file, params_file, model_format);

// Get input shapes/names/dtypes
Expand Down Expand Up @@ -256,6 +259,9 @@ static void showInputInfos(int argc, char* argv[]) {
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return;
}
if (FLAGS_disable_mkldnn) {
option.paddle_infer_option.enable_mkldnn = false;
}
std::unordered_map<std::string, std::string> config_info;
benchmark::ResultManager::LoadBenchmarkConfig(FLAGS_config_path,
&config_info);
Expand Down Expand Up @@ -286,7 +292,9 @@ static void showInputInfos(int argc, char* argv[]) {
int main(int argc, char* argv[]) {
#if defined(ENABLE_BENCHMARK)
google::SetVersionString("0.0.0");
google::SetUsageMessage("./benchmark -[info|diff|check|dump|mem] -model xxx -config_path xxx -[shapes|dtypes|names|tensors] -[model_file|params_file|model_format]");
google::SetUsageMessage(
"./benchmark -[info|diff|check|dump|mem] -model xxx -config_path xxx "
"-[shapes|dtypes|names|tensors] -[model_file|params_file|model_format]");
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_diff) {
CheckTensorDiff(argc, argv);
Expand Down
7 changes: 3 additions & 4 deletions benchmark/cpp/benchmark_picodet.cc
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ int main(int argc, char* argv[]) {
config_info["backend"] == "trt") {
option.trt_option.SetShape("image", {1, 3, 640, 640}, {1, 3, 640, 640},
{1, 3, 640, 640});
option.trt_option.SetShape("scale_factor", {1, 2}, {1, 2},
{1, 2});
option.trt_option.SetShape("scale_factor", {1, 2}, {1, 2}, {1, 2});
}
auto model_picodet = vision::detection::PicoDet(
model_file, params_file, config_file, option, model_format);
Expand Down Expand Up @@ -81,9 +80,9 @@ int main(int argc, char* argv[]) {
}
// Run profiling
BENCHMARK_MODEL(model_picodet, model_picodet.Predict(im, &res))
auto vis_im = vision::VisDetection(im, res);
auto vis_im = vision::VisDetection(im, res, 0.5f);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
#endif
return 0;
}
}
89 changes: 89 additions & 0 deletions benchmark/cpp/benchmark_ppshituv2_det.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http:https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "flags.h"
#include "macros.h"
#include "option.h"

namespace vision = fastdeploy::vision;
namespace benchmark = fastdeploy::benchmark;

DEFINE_bool(no_nms, false, "Whether the model contains nms.");

int main(int argc, char* argv[]) {
#if defined(ENABLE_BENCHMARK) && defined(ENABLE_VISION)
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
auto im = cv::imread(FLAGS_image);
std::unordered_map<std::string, std::string> config_info;
benchmark::ResultManager::LoadBenchmarkConfig(FLAGS_config_path,
&config_info);
std::string model_name, params_name, config_name;
auto model_format = fastdeploy::ModelFormat::PADDLE;
if (!UpdateModelResourceName(&model_name, &params_name, &config_name,
&model_format, config_info)) {
return -1;
}

auto model_file = FLAGS_model + sep + model_name;
auto params_file = FLAGS_model + sep + params_name;
auto config_file = FLAGS_model + sep + config_name;
if (config_info["backend"] == "paddle_trt") {
option.paddle_infer_option.collect_trt_shape = true;
}
if (config_info["backend"] == "paddle_trt" ||
config_info["backend"] == "trt") {
option.trt_option.SetShape("image", {1, 3, 640, 640}, {1, 3, 640, 640},
{1, 3, 640, 640});
option.trt_option.SetShape("scale_factor", {1, 2}, {1, 2}, {1, 2});
option.trt_option.SetShape("im_shape", {1, 2}, {1, 2}, {1, 2});
}
auto model = vision::classification::PPShiTuV2Detector(
model_file, params_file, config_file, option, model_format);
if (FLAGS_no_nms) {
model.GetPostprocessor().ApplyNMS();
}
vision::DetectionResult res;
if (config_info["precision_compare"] == "true") {
// Run once at least
model.Predict(im, &res);
// 1. Test result diff
std::cout << "=============== Test result diff =================\n";
// Save result to -> disk.
std::string det_result_path = "ppshituv2_det_result.txt";
benchmark::ResultManager::SaveDetectionResult(res, det_result_path);
// Load result from <- disk.
vision::DetectionResult res_loaded;
benchmark::ResultManager::LoadDetectionResult(&res_loaded, det_result_path);
// Calculate diff between two results.
auto det_diff =
benchmark::ResultManager::CalculateDiffStatis(res, res_loaded);
std::cout << "Boxes diff: mean=" << det_diff.boxes.mean
<< ", max=" << det_diff.boxes.max
<< ", min=" << det_diff.boxes.min << std::endl;
std::cout << "Label_ids diff: mean=" << det_diff.labels.mean
<< ", max=" << det_diff.labels.max
<< ", min=" << det_diff.labels.min << std::endl;
}
// Run profiling
BENCHMARK_MODEL(model, model.Predict(im, &res))
auto vis_im = vision::VisDetection(im, res, 0.5f);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
#endif
return 0;
}
93 changes: 93 additions & 0 deletions benchmark/cpp/benchmark_ppshituv2_rec.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http:https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "flags.h"
#include "macros.h"
#include "option.h"

namespace vision = fastdeploy::vision;
namespace benchmark = fastdeploy::benchmark;

DEFINE_string(trt_shape, "1,3,224,224:1,3,224,224:1,3,224,224",
"Set min/opt/max shape for trt/paddle_trt backend."
"eg:--trt_shape 1,3,224,224:1,3,224,224:1,3,224,224");

DEFINE_string(input_name, "x",
"Set input name for trt/paddle_trt backend."
"eg:--input_names x");

int main(int argc, char* argv[]) {
#if defined(ENABLE_BENCHMARK) && defined(ENABLE_VISION)
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
auto im = cv::imread(FLAGS_image);
std::unordered_map<std::string, std::string> config_info;
benchmark::ResultManager::LoadBenchmarkConfig(FLAGS_config_path,
&config_info);
// Set max_batch_size 1 for best performance
if (config_info["backend"] == "paddle_trt") {
option.trt_option.max_batch_size = 1;
}
std::string model_name, params_name, config_name;
auto model_format = fastdeploy::ModelFormat::PADDLE;
if (!UpdateModelResourceName(&model_name, &params_name, &config_name,
&model_format, config_info)) {
return -1;
}

auto model_file = FLAGS_model + sep + model_name;
auto params_file = FLAGS_model + sep + params_name;
auto config_file = FLAGS_model + sep + config_name;
if (config_info["backend"] == "paddle_trt") {
option.paddle_infer_option.collect_trt_shape = true;
}
if (config_info["backend"] == "paddle_trt" ||
config_info["backend"] == "trt") {
std::vector<std::vector<int32_t>> trt_shapes =
benchmark::ResultManager::GetInputShapes(FLAGS_trt_shape);
option.trt_option.SetShape(FLAGS_input_name, trt_shapes[0], trt_shapes[1],
trt_shapes[2]);
}

auto model = vision::classification::PPShiTuV2Recognizer(
model_file, params_file, config_file, option, model_format);
vision::ClassifyResult res;
if (config_info["precision_compare"] == "true") {
// Run once at least
model.Predict(im, &res);
// 1. Test result diff
std::cout << "=============== Test result diff =================\n";
// Save result to -> disk.
std::string cls_result_path = "ppcls_result.txt";
benchmark::ResultManager::SaveClassifyResult(res, cls_result_path);
// Load result from <- disk.
vision::ClassifyResult res_loaded;
benchmark::ResultManager::LoadClassifyResult(&res_loaded, cls_result_path);
// Calculate diff between two results.
auto cls_diff =
benchmark::ResultManager::CalculateDiffStatis(res, res_loaded);
std::cout << "Labels diff: mean=" << cls_diff.labels.mean
<< ", max=" << cls_diff.labels.max
<< ", min=" << cls_diff.labels.min << std::endl;
std::cout << "Scores diff: mean=" << cls_diff.scores.mean
<< ", max=" << cls_diff.scores.max
<< ", min=" << cls_diff.scores.min << std::endl;
}
BENCHMARK_MODEL(model, model.Predict(im, &res))
#endif
return 0;
}
Loading

0 comments on commit 0f798a9

Please sign in to comment.