From c948b72e6c5285dbd50b2a90306ecfdf1eda9fbf Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 18:08:02 +0800 Subject: [PATCH 1/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0PPHumanSeg=20Example?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/cn/faq/rknpu2/rknpu2.md | 37 +++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/docs/cn/faq/rknpu2/rknpu2.md b/docs/cn/faq/rknpu2/rknpu2.md index 99554e5bad..1426e0cd3d 100644 --- a/docs/cn/faq/rknpu2/rknpu2.md +++ b/docs/cn/faq/rknpu2/rknpu2.md @@ -8,25 +8,28 @@ RKNPU2模型导出只支持在x86Linux平台上进行导出,安装流程请参 ONNX模型不能直接调用RK芯片中的NPU进行运算,需要把ONNX模型转换为RKNN模型,具体流程请查看[RKNPU2转换文档](./export.md) ## RKNPU2已经支持的模型列表 -以下环境测试的速度均为端到端,测试环境如下: -* 设备型号: RK3588 -* ARM CPU使用ONNX框架进行测试 + +FastDeploy在RK3588s上进行了测试,测试环境如下: + +* 设备型号: RK3588-s * NPU均使用单核进行测试 -| 任务场景 | 模型 | 模型版本(表示已经测试的版本) | ARM CPU/RKNN速度(ms) | -|----------------------|--------------------------------------------------------------------------------------------------|--------------------------|--------------------| -| Detection | [Picodet](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | Picodet-s | 162/112 | -| Detection | [PaddleDetection Yolov8](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | yolov8-n | -/100 | -| Detection | [PPYOLOE](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | ppyoloe-s(int8) | -/77 | -| Detection | [RKYOLOV5](../../../../examples/vision/detection/rkyolo/README.md) | YOLOV5-S-Relu(int8) | -/57 | -| Detection | [RKYOLOX](../../../../examples/vision/detection/rkyolo/README.md) | - | -/- | -| Detection | [RKYOLOV7](../../../../examples/vision/detection/rkyolo/README.md) | - | -/- | -| Segmentation | [Unet](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | Unet-cityscapes | -/- | -| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | portrait(int8) | 133/43 | -| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | human(int8) | 133/43 | -| Face Detection | [SCRFD](../../../../examples/vision/facedet/scrfd/rknpu2/README.md) | SCRFD-2.5G-kps-640(int8) | 108/42 | -| Face FaceRecognition | [InsightFace](../../../../examples/vision/faceid/insightface/rknpu2/README_CN.md) | ms1mv3_arcface_r18(int8) | 81/12 | -| Classification | [ResNet](../../../../examples/vision/classification/paddleclas/rknpu2/README.md) | ResNet50_vd | -/33 | +以下环境测试的速度均为端到端测试速度根据芯片体质的不同,速度会上下有所浮动,仅供参考。 + +| 任务场景 | 模型及其example | 模型版本 | 是否量化 | RKNN速度(ms) | +|----------------------|--------------------------------------------------------------------------------------------------|--------------------------|------|------------| +| Classification | [ResNet](../../../../examples/vision/classification/paddleclas/rknpu2/README.md) | ResNet50_vd | 否 | 33 | +| Detection | [Picodet](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | Picodet-s | 否 | 112 | +| Detection | [PaddleDetection Yolov8](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | yolov8-n | 否 | 100 | +| Detection | [PPYOLOE](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | ppyoloe-s(int8) | 是 | 141 | +| Detection | [RKYOLOV5](../../../../examples/vision/detection/rkyolo/README.md) | YOLOV5-S-Relu(int8) | 是 | 57 | +| Detection | [RKYOLOX](../../../../examples/vision/detection/rkyolo/README.md) | yolox-s | 是 | 130 | +| Detection | [RKYOLOV7](../../../../examples/vision/detection/rkyolo/README.md) | yolov7-tiny | 是 | 58 | +| Segmentation | [Unet](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | Unet-cityscapes | 否 | - | +| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | portrait(int8) | 是 | 43 | +| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | human(int8) | 是 | 43 | +| Face Detection | [SCRFD](../../../../examples/vision/facedet/scrfd/rknpu2/README.md) | SCRFD-2.5G-kps-640(int8) | 是 | 42 | +| Face FaceRecognition | [InsightFace](../../../../examples/vision/faceid/insightface/rknpu2/README_CN.md) | ms1mv3_arcface_r18(int8) | 是 | 12 | ## 预编译库下载 From 4ccfbead16524411c50d9bae999348e415e4003d Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 19:25:44 +0800 Subject: [PATCH 2/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0Preprocess=E4=BB=A3?= =?UTF-8?q?=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../keypointdet/pptinypose/pptinypose.cc | 19 ++++++++++++------- .../keypointdet/pptinypose/pptinypose.h | 18 +++++++++++++++++- 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc index a6de59c9e1..d3668f8a5e 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc @@ -21,6 +21,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_kunlunxin_backends = {Backend::LITE}; + valid_rknpu_backends = {Backend::RKNPU2}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; @@ -66,14 +67,18 @@ bool PPTinyPose::BuildPreprocessPipelineFromConfig() { for (const auto& op : cfg["Preprocess"]) { std::string op_name = op["type"].as(); if (op_name == "NormalizeImage") { - auto mean = op["mean"].as>(); - auto std = op["std"].as>(); - bool is_scale = op["is_scale"].as(); - processors_.push_back(std::make_shared(mean, std, is_scale)); + if (!disable_normalize_) { + auto mean = op["mean"].as>(); + auto std = op["std"].as>(); + bool is_scale = op["is_scale"].as(); + processors_.push_back(std::make_shared(mean, std, is_scale)); + } } else if (op_name == "Permute") { - // permute = cast + HWC2CHW - processors_.push_back(std::make_shared("float")); - processors_.push_back(std::make_shared()); + if (!disable_permute_) { + // permute = cast + HWC2CHW + processors_.push_back(std::make_shared("float")); + processors_.push_back(std::make_shared()); + } } else if (op_name == "TopDownEvalAffine") { auto trainsize = op["trainsize"].as>(); int height = trainsize[1]; diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.h b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.h index bfa8a8e604..df114b2bd5 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.h +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.h @@ -30,7 +30,7 @@ namespace keypointdetection { */ class FASTDEPLOY_DECL PPTinyPose : public FastDeployModel { public: - /** \brief Set path of model file and configuration file, and the configuration of runtime + /** \brief Set path of model file and configuration file, and the configuration of runtime * * \param[in] model_file Path of model file, e.g pptinypose/model.pdmodel * \param[in] params_file Path of parameter file, e.g pptinypose/model.pdiparams, if the model format is ONNX, this parameter will be ignored @@ -68,6 +68,18 @@ class FASTDEPLOY_DECL PPTinyPose : public FastDeployModel { */ bool use_dark = true; + /// This function will disable normalize in preprocessing step. + void DisableNormalize() { + disable_normalize_ = true; + BuildPreprocessPipelineFromConfig(); + } + + /// This function will disable hwc2chw in preprocessing step. + void DisablePermute() { + disable_permute_ = true; + BuildPreprocessPipelineFromConfig(); + } + protected: bool Initialize(); /// Build the preprocess pipeline from the loaded model @@ -84,6 +96,10 @@ class FASTDEPLOY_DECL PPTinyPose : public FastDeployModel { private: std::vector> processors_; std::string config_file_; + // for recording the switch of hwc2chw + bool disable_permute_ = false; + // for recording the switch of normalize + bool disable_normalize_ = false; }; } // namespace keypointdetection } // namespace vision From f900199c02b088ae4005588064b72d81ae8af14b Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 19:53:48 +0800 Subject: [PATCH 3/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0example=20=E5=92=8C?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E8=BD=AC=E6=8D=A2=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tiny_pose/rknpu2/README.md | 55 ++++++++++++ .../tiny_pose/rknpu2/cpp/CMakeLists.txt | 13 +++ .../tiny_pose/rknpu2/cpp/README.md | 85 +++++++++++++++++++ .../tiny_pose/rknpu2/cpp/pptinypose_infer.cc | 70 +++++++++++++++ .../paddleseg/sophgo/python/README.md | 2 +- .../PP_TinyPose_256x192_unquantized.yaml | 15 ++++ 6 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 examples/vision/keypointdetection/tiny_pose/rknpu2/README.md create mode 100644 examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/CMakeLists.txt create mode 100644 examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md create mode 100755 examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc create mode 100644 tools/rknpu2/config/PP_TinyPose_256x192_unquantized.yaml diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md b/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md new file mode 100644 index 0000000000..f7f270c0ac --- /dev/null +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md @@ -0,0 +1,55 @@ +[English](README.md) | 简体中文 +# PP-TinyPose RKNPU2部署示例 + +## 模型版本说明 + +- [PaddleDetection release/2.5](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5) + +目前FastDeploy支持如下模型的部署 + +- [PP-TinyPose系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5/configs/keypoint/tiny_pose/README.md) + +## 准备PP-TinyPose部署模型 + +PP-TinyPose模型导出,请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/deploy/EXPORT_MODEL.md) + +**注意**:PP-TinyPose导出的模型包含`model.pdmodel`、`model.pdiparams`和`infer_cfg.yml`三个文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息。 + +## 模型转换example + +### Paddle模型转换为ONNX模型 + +由于Rockchip提供的rknn-toolkit2工具暂时不支持Paddle模型直接导出为RKNN模型,因此需要先将Paddle模型导出为ONNX模型,再将ONNX模型转为RKNN模型。 + +```bash +# 下载Paddle静态图模型并解压 +wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_TinyPose_256x192_infer.tgz +tar -xvf PP_TinyPose_256x192_infer.tgz + +# 静态图转ONNX模型,注意,这里的save_file请和压缩包名对齐 +paddle2onnx --model_dir PP_TinyPose_256x192_infer \ + --model_filename model.pdmodel \ + --params_filename model.pdiparams \ + --save_file PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx \ + --enable_dev_version True + +# 固定shape +python -m paddle2onnx.optimize --input_model PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx \ + --output_model PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx \ + --input_shape_dict "{'image':[1,3,256,192]}" +``` + +### ONNX模型转RKNN模型 + +为了方便大家使用,我们提供了python脚本,通过我们预配置的config文件,你将能够快速地转换ONNX模型到RKNN模型 + +```bash +python tools/rknpu2/export.py --config_path tools/rknpu2/config/PP_TinyPose_256x192_unquantized.yaml \ + --target_platform rk3588 +``` + +## 详细部署文档 + +- [模型详细介绍](../README_CN.md) +- [Python部署](python) +- [C++部署](cpp) \ No newline at end of file diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/CMakeLists.txt b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/CMakeLists.txt new file mode 100644 index 0000000000..0f492f44b0 --- /dev/null +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/CMakeLists.txt @@ -0,0 +1,13 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_tinypose_demo ${PROJECT_SOURCE_DIR}/pptinypose_infer.cc) +target_link_libraries(infer_tinypose_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md new file mode 100644 index 0000000000..5472295043 --- /dev/null +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md @@ -0,0 +1,85 @@ +[English](README.md) | 简体中文 +# PP-TinyPose C++部署示例 + +本目录下提供`pptinypose_infer.cc`快速完成PP-TinyPose通过NPU加速部署的`单图单人关键点检测`示例 +>> **注意**: PP-Tinypose单模型目前只支持单图单人关键点检测,因此输入的图片应只包含一个人或者进行过裁剪的图像。多人关键点检测请参考[PP-TinyPose Pipeline](../../../det_keypoint_unite/cpp/README.md) + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) + + +以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.3以上(x.x.x>=1.0.3) + +```bash +mkdir build +cd build +# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用 +wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz +tar xvf fastdeploy-linux-x64-x.x.x.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x +make -j + +# 下载PP-TinyPose模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_TinyPose_256x192_infer.tgz +tar -xvf PP_TinyPose_256x192_infer.tgz +wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg + + +# CPU推理 +./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg +``` + +运行完成可视化结果如下图所示 +
+ +
+ +以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考: +- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md) + +## PP-TinyPose C++接口 + +### PP-TinyPose类 + +```c++ +fastdeploy::vision::keypointdetection::PPTinyPose( + const string& model_file, + const string& params_file = "", + const string& config_file, + const RuntimeOption& runtime_option = RuntimeOption(), + const ModelFormat& model_format = ModelFormat::PADDLE) +``` + +PPTinyPose模型加载和初始化,其中model_file为导出的Paddle模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 推理部署配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(ModelFormat): 模型格式,默认为Paddle格式 + +#### Predict函数 + +> ```c++ +> PPTinyPose::Predict(cv::Mat* im, KeyPointDetectionResult* result) +> ``` +> +> 模型预测接口,输入图像直接输出关键点检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 关键点检测结果,包括关键点的坐标以及关键点对应的概率值, KeyPointDetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 +#### 后处理参数 +> > * **use_dark**(bool): 是否使用DARK进行后处理[参考论文](https://arxiv.org/abs/1910.06278) + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) +- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md) diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc new file mode 100755 index 0000000000..3f8f66eb4b --- /dev/null +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc @@ -0,0 +1,70 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +void RKNPU2Infer(const std::string& tinypose_model_dir, + const std::string& image_file) { + auto tinypose_model_file = + tinypose_model_dir + "/picodet_s_416_coco_lcnet_rk3588.rknn"; + auto tinypose_params_file = ""; + auto tinypose_config_file = tinypose_model_dir + "infer_cfg.yml"; + auto option = fastdeploy::RuntimeOption(); + option.UseRKNPU2(); + auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose( + tinypose_model_file, tinypose_params_file, tinypose_config_file, option); + + if (!tinypose_model.Initialized()) { + std::cerr << "TinyPose Model Failed to initialize." << std::endl; + return; + } + + tinypose_model.DisablePermute(); + tinypose_model.DisableNormalize(); + + auto im = cv::imread(image_file); + fastdeploy::vision::KeyPointDetectionResult res; + if (!tinypose_model.Predict(&im, &res)) { + std::cerr << "TinyPose Prediction Failed." << std::endl; + return; + } else { + std::cout << "TinyPose Prediction Done!" << std::endl; + } + + std::cout << res.Str() << std::endl; + + auto tinypose_vis_im = fastdeploy::vision::VisKeypointDetection(im, res, 0.5); + cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im); + std::cout << "TinyPose visualized result saved in ./tinypose_vis_result.jpg" + << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout << "Usage: infer_demo path/to/pptinypose_model_dir path/to/image " + "run_option, " + "e.g ./infer_model ./pptinypose_model_dir ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu; 2: run with gpu and use tensorrt backend; 3: run " + "with kunlunxin." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + RKNPU2Infer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/segmentation/paddleseg/sophgo/python/README.md b/examples/vision/segmentation/paddleseg/sophgo/python/README.md index e646d6a903..55abb90f77 100644 --- a/examples/vision/segmentation/paddleseg/sophgo/python/README.md +++ b/examples/vision/segmentation/paddleseg/sophgo/python/README.md @@ -16,7 +16,7 @@ cd path/to/paddleseg/sophgo/python wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png # PaddleSeg模型转换为bmodel模型 -将Paddle模型转换为SOPHGO bmodel模型,转换步骤参考[文档](../README_CN.md#将paddleseg推理模型转换为bmodel模型步骤) +将Paddle模型转换为SOPHGO bmodel模型,转换步骤参考[文档](../README.md#将paddleseg推理模型转换为bmodel模型步骤) # 推理 python3 infer.py --model_file ./bmodel/pp_liteseg_1684x_f32.bmodel --config_file ./bmodel/deploy.yaml --image cityscapes_demo.png diff --git a/tools/rknpu2/config/PP_TinyPose_256x192_unquantized.yaml b/tools/rknpu2/config/PP_TinyPose_256x192_unquantized.yaml new file mode 100644 index 0000000000..28c2ab9436 --- /dev/null +++ b/tools/rknpu2/config/PP_TinyPose_256x192_unquantized.yaml @@ -0,0 +1,15 @@ +mean: + - + - 123.675 + - 116.28 + - 103.53 +std: + - + - 58.395 + - 57.12 + - 57.375 +model_path: ./PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx +outputs_nodes: ['conv2d_441.tmp_1'] +do_quantization: False +dataset: +output_folder: "./PP_TinyPose_256x192_infer" From 53333c5db6a1311f605c4c283224d62ecd513b19 Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 20:37:42 +0800 Subject: [PATCH 4/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0example=20=E5=92=8C?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E8=BD=AC=E6=8D=A2=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tiny_pose/rknpu2/cpp/README.md | 16 +++++++--------- .../tiny_pose/rknpu2/cpp/pptinypose_infer.cc | 19 +++++++------------ .../keypointdet/pptinypose/pptinypose.cc | 12 ++++++++++++ 3 files changed, 26 insertions(+), 21 deletions(-) diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md index 5472295043..843ebe6829 100644 --- a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/README.md @@ -15,9 +15,7 @@ ```bash mkdir build cd build -# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用 -wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz -tar xvf fastdeploy-linux-x64-x.x.x.tgz + cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x make -j @@ -27,8 +25,8 @@ tar -xvf PP_TinyPose_256x192_infer.tgz wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg -# CPU推理 -./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg +# NPU推理 +sudo ./infer_tinypose_demo ./PP_TinyPose_256x192_infer ./hrnet_demo.jpg ``` 运行完成可视化结果如下图所示 @@ -79,7 +77,7 @@ PPTinyPose模型加载和初始化,其中model_file为导出的Paddle模型格 #### 后处理参数 > > * **use_dark**(bool): 是否使用DARK进行后处理[参考论文](https://arxiv.org/abs/1910.06278) -- [模型介绍](../../) -- [Python部署](../python) -- [视觉模型预测结果](../../../../../docs/api/vision_results/) -- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md) +- [模型介绍](../../../) +- [Python部署](../../python) +- [视觉模型预测结果](../../../../../../docs/api/vision_results/) +- [如何切换模型推理后端引擎](../../../../../../docs/cn/faq/how_to_change_backend.md) diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc index 3f8f66eb4b..f0b0a263ee 100755 --- a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc @@ -17,13 +17,14 @@ void RKNPU2Infer(const std::string& tinypose_model_dir, const std::string& image_file) { auto tinypose_model_file = - tinypose_model_dir + "/picodet_s_416_coco_lcnet_rk3588.rknn"; + tinypose_model_dir + "/PP_TinyPose_256x192_infer_rk3588_unquantized.rknn"; auto tinypose_params_file = ""; - auto tinypose_config_file = tinypose_model_dir + "infer_cfg.yml"; + auto tinypose_config_file = tinypose_model_dir + "/infer_cfg.yml"; auto option = fastdeploy::RuntimeOption(); option.UseRKNPU2(); auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose( - tinypose_model_file, tinypose_params_file, tinypose_config_file, option); + tinypose_model_file, tinypose_params_file, tinypose_config_file, option, + fastdeploy::RKNN); if (!tinypose_model.Initialized()) { std::cerr << "TinyPose Model Failed to initialize." << std::endl; @@ -51,20 +52,14 @@ void RKNPU2Infer(const std::string& tinypose_model_dir, } int main(int argc, char* argv[]) { - if (argc < 4) { + if (argc < 3) { std::cout << "Usage: infer_demo path/to/pptinypose_model_dir path/to/image " "run_option, " - "e.g ./infer_model ./pptinypose_model_dir ./test.jpeg 0" - << std::endl; - std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend; 3: run " - "with kunlunxin." + "e.g ./infer_model ./pptinypose_model_dir ./test.jpeg" << std::endl; return -1; } - if (std::atoi(argv[3]) == 0) { - RKNPU2Infer(argv[1], argv[2]); - } + RKNPU2Infer(argv[1], argv[2]); return 0; } diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc index d3668f8a5e..598d8ae2ba 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc @@ -139,6 +139,18 @@ bool PPTinyPose::Postprocess(std::vector& infer_result, "Only support batch = 1 in FastDeploy now."); result->Clear(); + if (infer_result.size() == 1) { + FDTensor result_copy = infer_result[0]; + std::cout << "Reshape result_copy!" << std::endl; + result_copy.Reshape({result_copy.shape[0], result_copy.shape[1], + result_copy.shape[2] * result_copy.shape[3]}); + std::cout << "Resize infer_result!" << std::endl; + infer_result.resize(2); + std::cout << "Do ArgMax!" << std::endl; + function::ArgMax(result_copy,&infer_result[1],-1); + std::cout << "Done!" << std::endl; + } + // Calculate output length int outdata_size = std::accumulate(infer_result[0].shape.begin(), From 2b1631b563e2a9528c1b37962e3194fb4d65edb0 Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 20:55:26 +0800 Subject: [PATCH 5/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0pptinypose=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tiny_pose/rknpu2/cpp/pptinypose_infer.cc | 1 + fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc index f0b0a263ee..d3c7f7b8c6 100755 --- a/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/cpp/pptinypose_infer.cc @@ -18,6 +18,7 @@ void RKNPU2Infer(const std::string& tinypose_model_dir, const std::string& image_file) { auto tinypose_model_file = tinypose_model_dir + "/PP_TinyPose_256x192_infer_rk3588_unquantized.rknn"; + std::cout << tinypose_model_file << std::endl; auto tinypose_params_file = ""; auto tinypose_config_file = tinypose_model_dir + "/infer_cfg.yml"; auto option = fastdeploy::RuntimeOption(); diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc index 598d8ae2ba..f4b3bfc6ca 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc @@ -135,10 +135,12 @@ bool PPTinyPose::Postprocess(std::vector& infer_result, KeyPointDetectionResult* result, const std::vector& center, const std::vector& scale) { - FDASSERT(infer_result[1].shape[0] == 1, + FDASSERT(infer_result[0].shape[0] == 1, "Only support batch = 1 in FastDeploy now."); result->Clear(); + std::cout << "Postprocess" << std::endl; + std::cout << "infer_result.size() is " << infer_result.size() << std::endl; if (infer_result.size() == 1) { FDTensor result_copy = infer_result[0]; std::cout << "Reshape result_copy!" << std::endl; @@ -206,12 +208,14 @@ bool PPTinyPose::Predict(cv::Mat* im, KeyPointDetectionResult* result) { << ModelName() << "." << std::endl; return false; } + std::vector infer_result; if (!Infer(processed_data, &infer_result)) { FDERROR << "Failed to inference while using model:" << ModelName() << "." << std::endl; return false; } + if (!Postprocess(infer_result, result, center, scale)) { FDERROR << "Failed to postprocess while using model:" << ModelName() << "." << std::endl; From 8c42b708f6b17d5c36f5eb35d9dea56d6983fd23 Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 21:01:06 +0800 Subject: [PATCH 6/8] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=97=A0=E7=94=A8?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=EF=BC=8C=E6=9B=B4=E6=96=B0python=E8=84=9A?= =?UTF-8?q?=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tiny_pose/rknpu2/README.md | 4 +- .../tiny_pose/rknpu2/python/README_CN.md | 70 +++++++++++++++++++ .../rknpu2/python/pptinypose_infer.py | 50 +++++++++++++ .../keypointdet/pptinypose/pptinypose.cc | 6 -- 4 files changed, 122 insertions(+), 8 deletions(-) create mode 100644 examples/vision/keypointdetection/tiny_pose/rknpu2/python/README_CN.md create mode 100755 examples/vision/keypointdetection/tiny_pose/rknpu2/python/pptinypose_infer.py diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md b/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md index f7f270c0ac..100234ac88 100644 --- a/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/README.md @@ -51,5 +51,5 @@ python tools/rknpu2/export.py --config_path tools/rknpu2/config/PP_TinyPose_256x ## 详细部署文档 - [模型详细介绍](../README_CN.md) -- [Python部署](python) -- [C++部署](cpp) \ No newline at end of file +- [Python部署](./python) +- [C++部署](./cpp) \ No newline at end of file diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/python/README_CN.md b/examples/vision/keypointdetection/tiny_pose/rknpu2/python/README_CN.md new file mode 100644 index 0000000000..1a0f37d0bc --- /dev/null +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/python/README_CN.md @@ -0,0 +1,70 @@ +[English](README.md) | 简体中文 +# PP-TinyPose Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) + +本目录下提供`pptinypose_infer.py`快速完成PP-TinyPose在NPU加速部署的`单图单人关键点检测`示例。执行如下脚本即可完成 + +>> **注意**: PP-Tinypose单模型目前只支持单图单人关键点检测,因此输入的图片应只包含一个人或者进行过裁剪的图像。多人关键点检测请参考[PP-TinyPose Pipeline](../../../det_keypoint_unite/python/README.md) + +```bash +# 下载PP-TinyPose模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg + +# CPU推理 +python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg +``` + +运行完成可视化结果如下图所示 +
+ +
+ +## PP-TinyPose Python接口 + +```python +fd.vision.keypointdetection.PPTinyPose(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE) +``` + +PP-TinyPose模型加载和初始化,其中model_file, params_file以及config_file为训练模型导出的Paddle inference文件,具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/deploy/EXPORT_MODEL.md) + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 推理部署配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(ModelFormat): 模型格式,默认为Paddle格式 + +### predict函数 + +> ```python +> PPTinyPose.predict(input_image) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **input_image**(np.ndarray): 输入数据,注意需为HWC,BGR格式 + +> **返回** +> +> > 返回`fastdeploy.vision.KeyPointDetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 +#### 后处理参数 +用户可按照自己的实际需求,修改下列后处理参数,从而影响最终的推理和部署效果 + +> > * **use_dark**(bool): 是否使用DARK进行后处理[参考论文](https://arxiv.org/abs/1910.06278) + + +## 其它文档 + +- [PP-TinyPose 模型介绍](..) +- [PP-TinyPose C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) +- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md) diff --git a/examples/vision/keypointdetection/tiny_pose/rknpu2/python/pptinypose_infer.py b/examples/vision/keypointdetection/tiny_pose/rknpu2/python/pptinypose_infer.py new file mode 100755 index 0000000000..a9e75dfa75 --- /dev/null +++ b/examples/vision/keypointdetection/tiny_pose/rknpu2/python/pptinypose_infer.py @@ -0,0 +1,50 @@ +import fastdeploy as fd +import cv2 +import os + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--tinypose_model_dir", + required=True, + help="path of paddletinypose model directory") + parser.add_argument( + "--image", required=True, help="path of test image file.") + return parser.parse_args() + + +def build_tinypose_option(args): + option = fd.RuntimeOption() + option.use_rknpu() + return option + + +args = parse_arguments() + +tinypose_model_file = os.path.join(args.tinypose_model_dir, "PP_TinyPose_256x192_infer_rk3588_unquantized.rknn") +tinypose_params_file = os.path.join(args.tinypose_model_dir, "") +tinypose_config_file = os.path.join(args.tinypose_model_dir, "infer_cfg.yml") +# 配置runtime,加载模型 +runtime_option = build_tinypose_option(args) +tinypose_model = fd.vision.keypointdetection.PPTinyPose( + tinypose_model_file, + tinypose_params_file, + tinypose_config_file, + runtime_option=runtime_option, + model_format=fd.ModelFormat.RKNN) +tinypose_model.disable_normalize() +tinypose_model.disable_permute() + +# 预测图片检测结果 +im = cv2.imread(args.image) +tinypose_result = tinypose_model.predict(im) +print("Paddle TinyPose Result:\n", tinypose_result) + +# 预测结果可视化 +vis_im = fd.vision.vis_keypoint_detection( + im, tinypose_result, conf_threshold=0.5) +cv2.imwrite("visualized_result.jpg", vis_im) +print("TinyPose visualized result save in ./visualized_result.jpg") diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc index f4b3bfc6ca..1d1ad5c3fc 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc @@ -139,18 +139,12 @@ bool PPTinyPose::Postprocess(std::vector& infer_result, "Only support batch = 1 in FastDeploy now."); result->Clear(); - std::cout << "Postprocess" << std::endl; - std::cout << "infer_result.size() is " << infer_result.size() << std::endl; if (infer_result.size() == 1) { FDTensor result_copy = infer_result[0]; - std::cout << "Reshape result_copy!" << std::endl; result_copy.Reshape({result_copy.shape[0], result_copy.shape[1], result_copy.shape[2] * result_copy.shape[3]}); - std::cout << "Resize infer_result!" << std::endl; infer_result.resize(2); - std::cout << "Do ArgMax!" << std::endl; function::ArgMax(result_copy,&infer_result[1],-1); - std::cout << "Done!" << std::endl; } // Calculate output length From 8faca05280e2f13ada67f81a354c8068a46e11ef Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Wed, 15 Feb 2023 21:03:15 +0800 Subject: [PATCH 7/8] =?UTF-8?q?=E6=9B=B4=E6=96=B0pybind?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../vision/keypointdet/pptinypose/pptinypose_pybind.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose_pybind.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose_pybind.cc index 7fc6a2aabf..8f0634c85d 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose_pybind.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose_pybind.cc @@ -36,6 +36,14 @@ void BindPPTinyPose(pybind11::module& m) { self.Predict(&mat, &res, detection_result); return res; }) + .def("disable_normalize", + [](vision::keypointdetection::PPTinyPose& self) { + self.DisableNormalize(); + }) + .def("disable_permute", + [](vision::keypointdetection::PPTinyPose& self) { + self.DisablePermute(); + }) .def_readwrite("use_dark", &vision::keypointdetection::PPTinyPose::use_dark); } From 2b7c23683a91932d8a20737cec2e2f5a337e073b Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng Date: Thu, 16 Feb 2023 10:38:10 +0800 Subject: [PATCH 8/8] =?UTF-8?q?=E6=8C=89=E7=85=A7=E8=A6=81=E6=B1=82?= =?UTF-8?q?=E6=9B=B4=E6=96=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../vision/keypointdet/pptinypose/pptinypose.cc | 2 +- .../vision/keypointdetection/pptinypose/__init__.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc index 1d1ad5c3fc..c7360c9f3b 100644 --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc @@ -144,7 +144,7 @@ bool PPTinyPose::Postprocess(std::vector& infer_result, result_copy.Reshape({result_copy.shape[0], result_copy.shape[1], result_copy.shape[2] * result_copy.shape[3]}); infer_result.resize(2); - function::ArgMax(result_copy,&infer_result[1],-1); + function::ArgMax(result_copy, &infer_result[1], -1); } // Calculate output length diff --git a/python/fastdeploy/vision/keypointdetection/pptinypose/__init__.py b/python/fastdeploy/vision/keypointdetection/pptinypose/__init__.py index 6dfe58d983..679605a7c3 100644 --- a/python/fastdeploy/vision/keypointdetection/pptinypose/__init__.py +++ b/python/fastdeploy/vision/keypointdetection/pptinypose/__init__.py @@ -71,3 +71,15 @@ def use_dark(self, value): assert isinstance( value, bool), "The value to set `use_dark` must be type of bool." self._model.use_dark = value + + def disable_normalize(self): + """ + This function will disable normalize in preprocessing step. + """ + self.disable_normalize() + + def disable_permute(self): + """ + This function will disable hwc2chw in preprocessing step. + """ + self.disable_permute()