From 1c115bb2378767627a82e8703177e0f26859ab27 Mon Sep 17 00:00:00 2001
From: guxukai <44280887+GodIsBoom@users.noreply.github.com>
Date: Tue, 7 Feb 2023 14:05:08 +0800
Subject: [PATCH] [Model] Add facedet model: CenterFace (#1131)
* cpp example run success
* add landmarks
* fix reviewed problem
* add pybind
* add readme in examples
* fix reviewed problem
* new file: tests/models/test_centerface.py
* fix reviewed problem 230202
---
examples/vision/facedet/centerface/README.md | 25 +++
.../vision/facedet/centerface/README_CN.md | 24 +++
.../facedet/centerface/cpp/CMakeLists.txt | 14 ++
.../vision/facedet/centerface/cpp/README.md | 78 ++++++++++
.../facedet/centerface/cpp/README_CN.md | 77 ++++++++++
.../vision/facedet/centerface/cpp/infer.cc | 105 +++++++++++++
.../facedet/centerface/python/README.md | 75 +++++++++
.../facedet/centerface/python/README_CN.md | 74 +++++++++
.../vision/facedet/centerface/python/infer.py | 51 +++++++
fastdeploy/vision.h | 1 +
.../facedet/contrib/centerface/centerface.cc | 87 +++++++++++
.../facedet/contrib/centerface/centerface.h | 81 ++++++++++
.../contrib/centerface/centerface_pybind.cc | 85 +++++++++++
.../contrib/centerface/postprocessor.cc | 138 +++++++++++++++++
.../contrib/centerface/postprocessor.h | 67 ++++++++
.../contrib/centerface/preprocessor.cc | 80 ++++++++++
.../facedet/contrib/centerface/preprocessor.h | 61 ++++++++
fastdeploy/vision/facedet/facedet_pybind.cc | 2 +
python/fastdeploy/vision/facedet/__init__.py | 1 +
.../vision/facedet/contrib/centerface.py | 144 ++++++++++++++++++
tests/models/test_centerface.py | 99 ++++++++++++
21 files changed, 1369 insertions(+)
create mode 100644 examples/vision/facedet/centerface/README.md
create mode 100644 examples/vision/facedet/centerface/README_CN.md
create mode 100644 examples/vision/facedet/centerface/cpp/CMakeLists.txt
create mode 100644 examples/vision/facedet/centerface/cpp/README.md
create mode 100644 examples/vision/facedet/centerface/cpp/README_CN.md
create mode 100644 examples/vision/facedet/centerface/cpp/infer.cc
create mode 100644 examples/vision/facedet/centerface/python/README.md
create mode 100644 examples/vision/facedet/centerface/python/README_CN.md
create mode 100644 examples/vision/facedet/centerface/python/infer.py
mode change 100755 => 100644 fastdeploy/vision.h
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/centerface.cc
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/centerface.h
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/centerface_pybind.cc
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/postprocessor.cc
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/postprocessor.h
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/preprocessor.cc
create mode 100644 fastdeploy/vision/facedet/contrib/centerface/preprocessor.h
create mode 100644 python/fastdeploy/vision/facedet/contrib/centerface.py
create mode 100644 tests/models/test_centerface.py
diff --git a/examples/vision/facedet/centerface/README.md b/examples/vision/facedet/centerface/README.md
new file mode 100644
index 0000000000..7227096713
--- /dev/null
+++ b/examples/vision/facedet/centerface/README.md
@@ -0,0 +1,25 @@
+English | [简体中文](README_CN.md)
+
+# CenterFace Ready-to-deploy Model
+
+- The deployment of the CenterFace model is based on [CenterFace](https://github.com/Star-Clouds/CenterFace.git) and [Pre-trained Model Based on WIDER FACE](https://github.com/Star-Clouds/CenterFace.git)
+ - (1)The *.onnx provided by [Official Repository](https://github.com/Star-Clouds/CenterFace.git) can be deployed directly;
+ - (2)The CenterFace train code is not open source and users cannot train it.
+
+
+## Download Pre-trained ONNX Model
+
+For developers' testing, models exported by CenterFace are provided below. Developers can download them directly. (The accuracy in the following table is derived from the source official repository on WIDER FACE test set)
+| Model | Size | Accuracy(Easy Set,Medium Set,Hard Set) | Note |
+|:---------------------------------------------------------------- |:----- |:----- |:---- |
+| [CenterFace](https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx) | 7.2MB | 93.2%,92.1%,87.3% | This model file is sourced from [CenterFace](https://github.com/Star-Clouds/CenterFace.git),MIT license |
+
+
+## Detailed Deployment Documents
+
+- [Python Deployment](python)
+- [C++ Deployment](cpp)
+
+## Release Note
+
+- Document and code are based on [CenterFace](https://github.com/Star-Clouds/CenterFace.git)
\ No newline at end of file
diff --git a/examples/vision/facedet/centerface/README_CN.md b/examples/vision/facedet/centerface/README_CN.md
new file mode 100644
index 0000000000..34c996fdbc
--- /dev/null
+++ b/examples/vision/facedet/centerface/README_CN.md
@@ -0,0 +1,24 @@
+[English](README.md) | 简体中文
+# CenterFace准备部署模型
+
+- CenterFace部署模型实现来自[CenterFace](https://github.com/Star-Clouds/CenterFace.git),和[基于WIDER FACE的预训练模型](https://github.com/Star-Clouds/CenterFace.git)
+ - (1)[官方库](https://github.com/Star-Clouds/CenterFace.git)提供的*.onnx可直接进行部署;
+ - (2)由于CenterFace未开放训练源代码,开发者无法基于自己的数据训练CenterFace模型
+
+
+## 下载预训练ONNX模型
+
+为了方便开发者的测试,下面提供了CenterFace导出的模型,开发者可直接下载使用。(下表中模型的精度来源于源官方库在WIDER FACE测试集上的结果)
+| 模型 | 大小 | 精度(Easy Set,Medium Set,Hard Set) | 备注 |
+|:---------------------------------------------------------------- |:----- |:----- |:---- |
+| [CenterFace](https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx) | 7.2MB | 93.2%,92.1%,87.3% | 此模型文件来源于[CenterFace](https://github.com/Star-Clouds/CenterFace.git),MIT license |
+
+
+## 详细部署文档
+
+- [Python部署](python)
+- [C++部署](cpp)
+
+## 版本说明
+
+- 本版本文档和代码基于[CenterFace](https://github.com/Star-Clouds/CenterFace.git) 编写
\ No newline at end of file
diff --git a/examples/vision/facedet/centerface/cpp/CMakeLists.txt b/examples/vision/facedet/centerface/cpp/CMakeLists.txt
new file mode 100644
index 0000000000..9ba6687625
--- /dev/null
+++ b/examples/vision/facedet/centerface/cpp/CMakeLists.txt
@@ -0,0 +1,14 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# Specifies the path to the fastdeploy library after you have downloaded it
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# Include the FastDeploy dependency header file
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# Add the FastDeploy library dependency
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
diff --git a/examples/vision/facedet/centerface/cpp/README.md b/examples/vision/facedet/centerface/cpp/README.md
new file mode 100644
index 0000000000..7c94c573a9
--- /dev/null
+++ b/examples/vision/facedet/centerface/cpp/README.md
@@ -0,0 +1,78 @@
+English | [简体中文](README_CN.md)
+# CenterFace C++ Deployment Example
+
+This directory provides examples that `infer.cc` fast finishes the deployment of CenterFace on CPU/GPU and GPU accelerated by TensorRT.
+
+Before deployment, two steps require confirmation
+
+- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+
+Taking the CPU inference on Linux as an example, the compilation test can be completed by executing the following command in this directory.
+
+```bash
+mkdir build
+cd build
+# Download the FastDeploy precompiled library. Users can choose your appropriate version in the `FastDeploy Precompiled Library` mentioned above
+wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz # x.x.x > 1.0.4
+tar xvf fastdeploy-linux-x64-x.x.x.tgz # x.x.x > 1.0.4
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x # x.x.x > 1.0.4
+make -j
+
+# Download the official converted CenterFace model files and test images
+wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
+wget https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx
+
+# Use CenterFace.onnx model
+# CPU inference
+./infer_demo CenterFace.onnx test_lite_face_detector_3.jpg 0
+# GPU inference
+./infer_demo CenterFace.onnx test_lite_face_detector_3.jpg 1
+# TensorRT inference on GPU
+./infer_demo CenterFace.onnx test_lite_face_detector_3.jpg 2
+```
+
+The visualized result after running is as follows
+
+
+
+The above command works for Linux or MacOS. For SDK use-pattern in Windows, refer to:
+- [How to use FastDeploy C++ SDK in Windows](../../../../../docs/cn/faq/use_sdk_on_windows.md)
+
+## CenterFace C++ Interface
+
+### CenterFace Class
+
+```c++
+fastdeploy::vision::facedet::CenterFace(
+ const string& model_file,
+ const string& params_file = "",
+ const RuntimeOption& runtime_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+```
+
+CenterFace model loading and initialization, among which model_file is the exported ONNX model format
+
+**Parameter**
+
+> * **model_file**(str): Model file path
+> * **params_file**(str): Parameter file path. Only passing an empty string when the model is in ONNX format
+> * **runtime_option**(RuntimeOption): Backend inference configuration. None by default, which is the default configuration
+> * **model_format**(ModelFormat): Model format. ONNX format by default
+
+#### Predict Function
+
+> ```c++
+> CenterFace::Predict(cv::Mat* im, FaceDetectionResult* result)
+> ```
+>
+> Model prediction interface. Input images and output detection results.
+>
+> **Parameter**
+>
+> > * **im**: Input images in HWC or BGR format
+> > * **result**: Detection results, including detection box and confidence of each box. Refer to [Vision Model Prediction Result](../../../../../docs/api/vision_results/) for FaceDetectionResult
+
+- [Model Description](../../)
+- [Python Deployment](../python)
+- [Vision Model Prediction Results](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/facedet/centerface/cpp/README_CN.md b/examples/vision/facedet/centerface/cpp/README_CN.md
new file mode 100644
index 0000000000..b9443271eb
--- /dev/null
+++ b/examples/vision/facedet/centerface/cpp/README_CN.md
@@ -0,0 +1,77 @@
+# CenterFace C++部署示例
+
+本目录下提供`infer.cc`快速完成CenterFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
+
+```bash
+mkdir build
+cd build
+# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
+wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz # x.x.x > 1.0.4
+tar xvf fastdeploy-linux-x64-x.x.x.tgz # x.x.x > 1.0.4
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x # x.x.x > 1.0.4
+make -j
+
+#下载官方转换好的CenterFace模型文件和测试图片
+wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
+wget https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx
+
+#使用CenterFace.onnx模型
+# CPU推理
+./infer_demo CenterFace.onnx test_lite_face_detector_3.jpg 0
+# GPU推理
+./infer_demo CenterFace.onnx test_lite_face_detector_3.jpg 1
+# GPU上TensorRT推理
+./infer_demo CenterFace.onnx test_lite_face_detector_3.jpg 2
+```
+
+运行完成可视化结果如下图所示
+
+
+
+以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
+- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
+
+## CenterFace C++接口
+
+### CenterFace类
+
+```c++
+fastdeploy::vision::facedet::CenterFace(
+ const string& model_file,
+ const string& params_file = "",
+ const RuntimeOption& runtime_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+```
+
+CenterFace模型加载和初始化,其中model_file为导出的ONNX模型格式。
+
+**参数**
+
+> * **model_file**(str): 模型文件路径
+> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
+> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
+> * **model_format**(ModelFormat): 模型格式,默认为ONNX格式
+
+#### Predict函数
+
+> ```c++
+> CenterFace::Predict(cv::Mat* im, FaceDetectionResult* result)
+> ```
+>
+> 模型预测接口,输入图像直接输出检测结果。
+>
+> **参数**
+>
+> > * **im**: 输入图像,注意需为HWC,BGR格式
+> > * **result**: 检测结果,包括检测框,各个框的置信度, FaceDetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+- [模型介绍](../../)
+- [Python部署](../python)
+- [视觉模型预测结果](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/facedet/centerface/cpp/infer.cc b/examples/vision/facedet/centerface/cpp/infer.cc
new file mode 100644
index 0000000000..1f4af84331
--- /dev/null
+++ b/examples/vision/facedet/centerface/cpp/infer.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+void CpuInfer(const std::string& model_file, const std::string& image_file) {
+ auto model = fastdeploy::vision::facedet::CenterFace(model_file);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::FaceDetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_file, const std::string& image_file) {
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::facedet::CenterFace(model_file, "", option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::FaceDetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_file, const std::string& image_file) {
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ option.SetTrtInputShape("images", {1, 3, 640, 640});
+ auto model = fastdeploy::vision::facedet::CenterFace(model_file, "", option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::FaceDetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
+ "e.g ./infer_model yolov5s-face.onnx ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu; 2: run with gpu and use tensorrt backend."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 2) {
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/facedet/centerface/python/README.md b/examples/vision/facedet/centerface/python/README.md
new file mode 100644
index 0000000000..7f7ecf97bf
--- /dev/null
+++ b/examples/vision/facedet/centerface/python/README.md
@@ -0,0 +1,75 @@
+English | [简体中文](README_CN.md)
+# CenterFace Python Deployment Example
+
+Before deployment, two steps require confirmation
+
+- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+- 2. Install FastDeploy Python whl package. Refer to [FastDeploy Python Installation](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+
+This directory provides examples that `infer.py` fast finishes the deployment of CenterFace on CPU/GPU and GPU accelerated by TensorRT. The script is as follows
+
+```bash
+# Download the example code for deployment
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd examples/vision/facedet/CenterFace/python/
+
+# Download CenterFace model files and test images
+wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
+wget https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx
+
+# Use CenterFace.onnx model
+# CPU inference
+python infer.py --model CenterFace.onnx --image test_lite_face_detector_3.jpg --device cpu
+# GPU inference
+python infer.py --model CenterFace.onnx --image test_lite_face_detector_3.jpg --device gpu
+# TensorRT inference on GPU
+python infer.py --model CenterFace.onnx --image test_lite_face_detector_3.jpg --device gpu --use_trt True
+```
+
+The visualized result after running is as follows
+
+
+
+## CenterFace Python Interface
+
+```python
+fastdeploy.vision.facedet.CenterFace(model_file, params_file=None, runtime_option=None, model_format=ModelFormat.ONNX)
+```
+
+CenterFace model loading and initialization, among which model_file is the exported ONNX model format
+
+**Parameter**
+
+> * **model_file**(str): Model file path
+> * **params_file**(str): Parameter file path. No need to set when the model is in ONNX format
+> * **runtime_option**(RuntimeOption): Backend inference configuration. None by default, which is the default configuration
+> * **model_format**(ModelFormat): Model format. ONNX format by default
+
+### predict function
+
+> ```python
+> CenterFace.predict(image_data)
+> ```
+>
+> Model prediction interface. Input images and output detection results.
+>
+> **Parameter**
+>
+> > * **image_data**(np.ndarray): Input data in HWC or BGR format
+
+
+> **Return**
+>
+> > Return `fastdeploy.vision.FaceDetectionResult` structure. Refer to [Vision Model Prediction Results](../../../../../docs/api/vision_results/) for its description.
+
+### Class Member Property
+#### Pre-processing Parameter
+Users can modify the following pre-processing parameters to their needs, which affects the final inference and deployment results
+
+> > * **size**(list[int]): This parameter changes the size of the resize used during preprocessing, containing two integer elements for [width, height] with default value [640, 640]
+
+## Other Documents
+
+- [CenterFace Model Description](..)
+- [CenterFace C++ Deployment](../cpp)
+- [Model Prediction Results](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/facedet/centerface/python/README_CN.md b/examples/vision/facedet/centerface/python/README_CN.md
new file mode 100644
index 0000000000..6fb7f39097
--- /dev/null
+++ b/examples/vision/facedet/centerface/python/README_CN.md
@@ -0,0 +1,74 @@
+[English](README.md) | 简体中文
+# CenterFace Python部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+本目录下提供`infer.py`快速完成CenterFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
+
+```bash
+#下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd examples/vision/facedet/CenterFace/python/
+
+#下载CenterFace模型文件和测试图片
+wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
+wget https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx
+
+#使用CenterFace.onnx模型
+# CPU推理
+python infer.py --model CenterFace.onnx --image test_lite_face_detector_3.jpg --device cpu
+# GPU推理
+python infer.py --model CenterFace.onnx --image test_lite_face_detector_3.jpg --device gpu
+# GPU上使用TensorRT推理
+python infer.py --model CenterFace.onnx --image test_lite_face_detector_3.jpg --device gpu --use_trt True
+```
+
+运行完成可视化结果如下图所示
+
+
+
+## CenterFace Python接口
+
+```python
+fastdeploy.vision.facedet.CenterFace(model_file, params_file=None, runtime_option=None, model_format=ModelFormat.ONNX)
+```
+
+CenterFace模型加载和初始化,其中model_file为导出的ONNX模型格式
+
+**参数**
+
+> * **model_file**(str): 模型文件路径
+> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定
+> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
+> * **model_format**(ModelFormat): 模型格式,默认为ONNX
+
+### predict函数
+
+> ```python
+> CenterFace.predict(image_data)
+> ```
+>
+> 模型预测结口,输入图像直接输出检测结果。
+>
+> **参数**
+>
+> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式
+
+> **返回**
+>
+> > 返回`fastdeploy.vision.FaceDetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+### 类成员属性
+#### 预处理参数
+用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+
+> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640]
+
+## 其它文档
+
+- [CenterFace 模型介绍](..)
+- [CenterFace C++部署](../cpp)
+- [模型预测结果说明](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/facedet/centerface/python/infer.py b/examples/vision/facedet/centerface/python/infer.py
new file mode 100644
index 0000000000..39eeaf39e4
--- /dev/null
+++ b/examples/vision/facedet/centerface/python/infer.py
@@ -0,0 +1,51 @@
+import fastdeploy as fd
+import cv2
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model", required=True, help="Path of CenterFace onnx model.")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ option.set_trt_input_shape("images", [1, 3, 640, 640])
+ return option
+
+
+args = parse_arguments()
+
+# Configure runtime and load the model
+runtime_option = build_option(args)
+model = fd.vision.facedet.CenterFace(args.model, runtime_option=runtime_option)
+
+# Predict image detection results
+im = cv2.imread(args.image)
+result = model.predict(im)
+print(result)
+# Visualization of prediction Results
+vis_im = fd.vision.vis_face_detection(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h
old mode 100755
new mode 100644
index 28721160c3..024302a56b
--- a/fastdeploy/vision.h
+++ b/fastdeploy/vision.h
@@ -41,6 +41,7 @@
#include "fastdeploy/vision/facedet/contrib/ultraface.h"
#include "fastdeploy/vision/facedet/contrib/yolov5face.h"
#include "fastdeploy/vision/facedet/contrib/yolov7face/yolov7face.h"
+#include "fastdeploy/vision/facedet/contrib/centerface/centerface.h"
#include "fastdeploy/vision/facedet/ppdet/blazeface/blazeface.h"
#include "fastdeploy/vision/faceid/contrib/insightface/model.h"
#include "fastdeploy/vision/faceid/contrib/adaface/adaface.h"
diff --git a/fastdeploy/vision/facedet/contrib/centerface/centerface.cc b/fastdeploy/vision/facedet/contrib/centerface/centerface.cc
new file mode 100644
index 0000000000..3d17f19384
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/centerface.cc
@@ -0,0 +1,87 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/facedet/contrib/centerface/centerface.h"
+#include "fastdeploy/vision/utils/utils.h"
+
+namespace fastdeploy{
+
+namespace vision{
+
+namespace facedet{
+
+CenterFace::CenterFace(const std::string& model_file,
+ const std::string& params_file,
+ const RuntimeOption& custom_option,
+ const ModelFormat& model_format) {
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ }
+ runtime_option = custom_option;
+ runtime_option.model_format = model_format;
+ runtime_option.model_file = model_file;
+ runtime_option.params_file = params_file;
+ initialized = Initialize();
+}
+
+bool CenterFace::Initialize(){
+ if (!InitRuntime()){
+ FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+bool CenterFace::Predict(const cv::Mat& im, FaceDetectionResult* result){
+ std::vector results;
+ if (!BatchPredict({im}, &results)) {
+ return false;
+ }
+ *result = std::move(results[0]);
+ return true;
+}
+
+bool CenterFace::BatchPredict(const std::vector& images,
+ std::vector* results){
+ std::vector fd_images = WrapMat(images);
+ FDASSERT(images.size() == 1, "Only support batch = 1 now.");
+ std::vector>> ims_info;
+ if (!preprocessor_.Run(&fd_images, &reused_input_tensors_, &ims_info)) {
+ FDERROR << "Failed to preprocess the input image." << std::endl;
+ return false;
+ }
+
+ reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
+ if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
+ FDERROR << "Failed to inference by runtime." << std::endl;
+ return false;
+ }
+
+ if (!postprocessor_.Run(reused_output_tensors_, results, ims_info)){
+ FDERROR << "Failed to postprocess the inference results by runtime." << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace facedet
+
+} // namespace vision
+
+} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/facedet/contrib/centerface/centerface.h b/fastdeploy/vision/facedet/contrib/centerface/centerface.h
new file mode 100644
index 0000000000..8d14d52b9f
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/centerface.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/fastdeploy_model.h"
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+#include "fastdeploy/vision/facedet/contrib/centerface/preprocessor.h"
+#include "fastdeploy/vision/facedet/contrib/centerface/postprocessor.h"
+
+namespace fastdeploy {
+
+namespace vision {
+
+namespace facedet {
+/*! @brief CenterFace model object used when to load a CenterFace model exported by CenterFace.
+ */
+class FASTDEPLOY_DECL CenterFace: public FastDeployModel{
+ public:
+ /** \brief Set path of model file and the configuration of runtime.
+ *
+ * \param[in] model_file Path of model file, e.g ./centerface.onnx
+ * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
+ * \param[in] model_format Model format of the loaded model, default is ONNX format
+ */
+ CenterFace(const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX);
+
+ std::string ModelName() {return "centerface";}
+
+ /** \brief Predict the detection result for an input image
+ *
+ * \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
+ * \param[in] result The output detection result will be writen to this structure
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool Predict(const cv::Mat& im, FaceDetectionResult* result);
+
+ /** \brief Predict the detection results for a batch of input images
+ *
+ * \param[in] imgs, The input image list, each element comes from cv::imread()
+ * \param[in] results The output detection result list
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool BatchPredict(const std::vector& images,
+ std::vector* results);
+
+ /// Get preprocessor reference of CenterFace
+ virtual CenterFacePreprocessor& GetPreprocessor() {
+ return preprocessor_;
+ }
+
+ /// Get postprocessor reference of CenterFace
+ virtual CenterFacePostprocessor& GetPostprocessor() {
+ return postprocessor_;
+ }
+
+ protected:
+ bool Initialize();
+ CenterFacePreprocessor preprocessor_;
+ CenterFacePostprocessor postprocessor_;
+};
+
+} // namespace facedet
+
+} // namespace vision
+
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/facedet/contrib/centerface/centerface_pybind.cc b/fastdeploy/vision/facedet/contrib/centerface/centerface_pybind.cc
new file mode 100644
index 0000000000..586e427b97
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/centerface_pybind.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/pybind/main.h"
+
+namespace fastdeploy {
+void BindCenterFace(pybind11::module& m) {
+ pybind11::class_(
+ m, "CenterFacePreprocessor")
+ .def(pybind11::init<>())
+ .def("run", [](vision::facedet::CenterFacePreprocessor& self, std::vector& im_list) {
+ std::vector images;
+ for (size_t i = 0; i < im_list.size(); ++i) {
+ images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
+ }
+ std::vector outputs;
+ std::vector>> ims_info;
+ if (!self.Run(&images, &outputs, &ims_info)) {
+ throw std::runtime_error("Failed to preprocess the input data in CenterFacePreprocessor.");
+ }
+ for (size_t i = 0; i < outputs.size(); ++i) {
+ outputs[i].StopSharing();
+ }
+ return make_pair(outputs, ims_info);
+ })
+ .def_property("size", &vision::facedet::CenterFacePreprocessor::GetSize, &vision::facedet::CenterFacePreprocessor::SetSize);
+
+ pybind11::class_(
+ m, "CenterFacePostprocessor")
+ .def(pybind11::init<>())
+ .def("run", [](vision::facedet::CenterFacePostprocessor& self, std::vector& inputs,
+ const std::vector>>& ims_info) {
+ std::vector results;
+ if (!self.Run(inputs, &results, ims_info)) {
+ throw std::runtime_error("Failed to postprocess the runtime result in CenterFacePostprocessor.");
+ }
+ return results;
+ })
+ .def("run", [](vision::facedet::CenterFacePostprocessor& self, std::vector& input_array,
+ const std::vector>>& ims_info) {
+ std::vector results;
+ std::vector inputs;
+ PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
+ if (!self.Run(inputs, &results, ims_info)) {
+ throw std::runtime_error("Failed to postprocess the runtime result in CenterFacePostprocessor.");
+ }
+ return results;
+ })
+ .def_property("conf_threshold", &vision::facedet::CenterFacePostprocessor::GetConfThreshold, &vision::facedet::CenterFacePostprocessor::SetConfThreshold)
+ .def_property("nms_threshold", &vision::facedet::CenterFacePostprocessor::GetNMSThreshold, &vision::facedet::CenterFacePostprocessor::SetNMSThreshold);
+
+ pybind11::class_(m, "CenterFace")
+ .def(pybind11::init())
+ .def("predict",
+ [](vision::facedet::CenterFace& self, pybind11::array& data) {
+ auto mat = PyArrayToCvMat(data);
+ vision::FaceDetectionResult res;
+ self.Predict(mat, &res);
+ return res;
+ })
+ .def("batch_predict", [](vision::facedet::CenterFace& self, std::vector& data) {
+ std::vector images;
+ for (size_t i = 0; i < data.size(); ++i) {
+ images.push_back(PyArrayToCvMat(data[i]));
+ }
+ std::vector results;
+ self.BatchPredict(images, &results);
+ return results;
+ })
+ .def_property_readonly("preprocessor", &vision::facedet::CenterFace::GetPreprocessor)
+ .def_property_readonly("postprocessor", &vision::facedet::CenterFace::GetPostprocessor);
+}
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/facedet/contrib/centerface/postprocessor.cc b/fastdeploy/vision/facedet/contrib/centerface/postprocessor.cc
new file mode 100644
index 0000000000..98931956b6
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/postprocessor.cc
@@ -0,0 +1,138 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/facedet/contrib/centerface/postprocessor.h"
+#include "fastdeploy/vision/utils/utils.h"
+
+namespace fastdeploy {
+
+namespace vision {
+
+namespace facedet {
+
+CenterFacePostprocessor::CenterFacePostprocessor() {
+ conf_threshold_ = 0.5;
+ nms_threshold_ = 0.3;
+ landmarks_per_face_ = 5;
+}
+
+bool CenterFacePostprocessor::Run(const std::vector& infer_result,
+ std::vector* results,
+ const std::vector>>& ims_info) {
+ int batch = infer_result[0].shape[0];
+
+ results->resize(batch);
+ FDTensor heatmap = infer_result[0]; //(1 1 160 160)
+ FDTensor scales = infer_result[1]; //(1 2 160 160)
+ FDTensor offsets = infer_result[2]; //(1 2 160 160)
+ FDTensor landmarks = infer_result[3]; //(1 10 160 160)
+ for (size_t bs = 0; bs < batch; ++bs) {
+ (*results)[bs].Clear();
+ (*results)[bs].landmarks_per_face = landmarks_per_face_;
+ (*results)[bs].Reserve(heatmap.shape[2]);
+ if (infer_result[0].dtype != FDDataType::FP32) {
+ FDERROR << "Only support post process with float32 data." << std::endl;
+ return false;
+ }
+ int fea_h = heatmap.shape[2];
+ int fea_w = heatmap.shape[3];
+ int spacial_size = fea_w * fea_h;
+
+ float *heatmap_out = static_cast(heatmap.Data());
+
+ float *scale0 = static_cast(scales.Data());
+ float *scale1 = scale0 + spacial_size;
+
+ float *offset0 = static_cast(offsets.Data());
+ float *offset1 = offset0 + spacial_size;
+ float confidence = 0.f;
+
+ std::vector ids;
+ for (int i = 0; i < fea_h; i++) {
+ for (int j = 0; j < fea_w; j++) {
+ if (heatmap_out[i*fea_w + j] > conf_threshold_) {
+ ids.push_back(i);
+ ids.push_back(j);
+ }
+ }
+ }
+
+ auto iter_out = ims_info[bs].find("output_shape");
+ auto iter_ipt = ims_info[bs].find("input_shape");
+ FDASSERT(iter_out != ims_info[bs].end() && iter_ipt != ims_info[bs].end(),
+ "Cannot find input_shape or output_shape from im_info.");
+ float out_h = iter_out->second[0];
+ float out_w = iter_out->second[1];
+ float ipt_h = iter_ipt->second[0];
+ float ipt_w = iter_ipt->second[1];
+ float scale_h = ipt_h / out_h;
+ float scale_w = ipt_w / out_w;
+
+ for (int i = 0; i < ids.size() / 2; i++) {
+ int id_h = ids[2 * i];
+ int id_w = ids[2 * i + 1];
+ int index = id_h * fea_w + id_w;
+ confidence = heatmap_out[index];
+
+ float s0 = std::exp(scale0[index]) * 4;
+ float s1 = std::exp(scale1[index]) * 4;
+ float o0 = offset0[index];
+ float o1 = offset1[index];
+
+ float x1 = (id_w + o1 + 0.5) * 4 - s1 / 2 > 0.f ? (id_w + o1 + 0.5) * 4 - s1 / 2 : 0;
+ float y1 =(id_h + o0 + 0.5) * 4 - s0 / 2 > 0 ? (id_h + o0 + 0.5) * 4 - s0 / 2 : 0;
+ float x2 = 0, y2 = 0;
+ x1 = x1 < (float)out_w ? x1 : (float)out_w;
+ y1 = y1 < (float)out_h ? y1 : (float)out_h;
+ x2 = x1 + s1 < (float)out_w ? x1 + s1 : (float)out_w;
+ y2 = y1 + s0 < (float)out_h ? y1 + s0 : (float)out_h;
+
+ (*results)[bs].boxes.emplace_back(std::array{x1, y1, x2, y2});
+ (*results)[bs].scores.push_back(confidence);
+ // decode landmarks (default 5 landmarks)
+ if (landmarks_per_face_ > 0) {
+ // reference: utils/box_utils.py#L241
+ for (size_t j = 0; j < landmarks_per_face_; j++) {
+ float *xmap = (float*)landmarks.Data() + (2 * j + 1) * spacial_size;
+ float *ymap = (float*)landmarks.Data() + (2 * j) * spacial_size;
+ float lx = (x1 + xmap[index] * s1) * scale_w;
+ float ly = (y1 + ymap[index] * s0) * scale_h;
+ (*results)[bs].landmarks.emplace_back(std::array{lx, ly});
+ }
+ }
+ }
+
+ if ((*results)[bs].boxes.size() == 0) {
+ return true;
+ }
+
+ utils::NMS(&((*results)[bs]), nms_threshold_);
+
+ for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
+ (*results)[bs].boxes[i][0] = std::max((*results)[bs].boxes[i][0] * scale_w, 0.0f);
+ (*results)[bs].boxes[i][1] = std::max((*results)[bs].boxes[i][1] * scale_h, 0.0f);
+ (*results)[bs].boxes[i][2] = std::max((*results)[bs].boxes[i][2] * scale_w, 0.0f);
+ (*results)[bs].boxes[i][3] = std::max((*results)[bs].boxes[i][3] * scale_h, 0.0f);
+ (*results)[bs].boxes[i][0] = std::min((*results)[bs].boxes[i][0], ipt_w - 1.0f);
+ (*results)[bs].boxes[i][1] = std::min((*results)[bs].boxes[i][1], ipt_h - 1.0f);
+ (*results)[bs].boxes[i][2] = std::min((*results)[bs].boxes[i][2], ipt_w - 1.0f);
+ (*results)[bs].boxes[i][3] = std::min((*results)[bs].boxes[i][3], ipt_h - 1.0f);
+ }
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/facedet/contrib/centerface/postprocessor.h b/fastdeploy/vision/facedet/contrib/centerface/postprocessor.h
new file mode 100644
index 0000000000..918b8ab1c3
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/postprocessor.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+
+namespace vision {
+
+namespace facedet {
+
+class FASTDEPLOY_DECL CenterFacePostprocessor{
+ public:
+ /*! @brief Postprocessor object for CenterFace serials model.
+ */
+ CenterFacePostprocessor();
+
+ /** \brief Process the result of runtime and fill to FaceDetectionResult structure
+ *
+ * \param[in] infer_result The inference result from runtime
+ * \param[in] results The output result of detection
+ * \param[in] ims_info The shape info list, record input_shape and output_shape
+ * \return true if the postprocess successed, otherwise false
+ */
+ bool Run(const std::vector& infer_result,
+ std::vector* results,
+ const std::vector>>& ims_info);
+
+ /// Set conf_threshold, default 0.5
+ void SetConfThreshold(const float& conf_threshold) {
+ conf_threshold_ = conf_threshold;
+ }
+
+ /// Get conf_threshold, default 0.5
+ float GetConfThreshold() const { return conf_threshold_; }
+
+ /// Set nms_threshold, default 0.3
+ void SetNMSThreshold(const float& nms_threshold) {
+ nms_threshold_ = nms_threshold;
+ }
+
+ /// Get nms_threshold, default 0.3
+ float GetNMSThreshold() const { return nms_threshold_; }
+
+ protected:
+ float conf_threshold_;
+ float nms_threshold_;
+ int landmarks_per_face_;
+};
+
+} // namespace facedet
+} // namespace vision
+} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/facedet/contrib/centerface/preprocessor.cc b/fastdeploy/vision/facedet/contrib/centerface/preprocessor.cc
new file mode 100644
index 0000000000..ae3cacb8df
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/preprocessor.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/facedet/contrib/centerface/preprocessor.h"
+#include "fastdeploy/function/concat.h"
+#include "fastdeploy/vision/common/processors/mat.h"
+
+namespace fastdeploy {
+
+namespace vision {
+
+namespace facedet {
+
+CenterFacePreprocessor::CenterFacePreprocessor() {
+ size_ = {640, 640};
+}
+
+bool CenterFacePreprocessor::Run(std::vector* images, std::vector* outputs,
+ std::vector>>* ims_info) {
+ if (images->size() == 0) {
+ FDERROR << "The size of input images should be greater than 0." << std::endl;
+ return false;
+ }
+ ims_info->resize(images->size());
+ outputs->resize(1);
+ std::vector tensors(images->size());
+ for (size_t i = 0; i < images->size(); i++) {
+ if (!Preprocess(&(*images)[i], &tensors[i], &(*ims_info)[i])) {
+ FDERROR << "Failed to preprocess input image." << std::endl;
+ return false;
+ }
+ }
+
+ if (tensors.size() == 1) {
+ (*outputs)[0] = std::move(tensors[0]);
+ } else {
+ function::Concat(tensors, &((*outputs)[0]), 0);
+ }
+ return true;
+}
+
+bool CenterFacePreprocessor::Preprocess(FDMat* mat, FDTensor* output,
+ std::map>* im_info){
+ // Record the shape of image and the shape of preprocessed image
+ (*im_info)["input_shape"] = {static_cast(mat->Height()),
+ static_cast(mat->Width())};
+
+ // centerface's preprocess steps
+ // 1. Resize
+ // 2. ConvertAndPermute
+ Resize::Run(mat, size_[0], size_[1]);
+ std::vector alpha = {1.0f, 1.0f, 1.0f};
+ std::vector beta = {0.0f, 0.0f, 0.0f};
+ ConvertAndPermute::Run(mat, alpha, beta,true);
+
+ // Record output shape of preprocessed image
+ (*im_info)["output_shape"] = {static_cast(mat->Height()),
+ static_cast(mat->Width())};
+
+ mat->ShareWithTensor(output);
+ output->ExpandDim(0);
+ return true;
+}
+
+} // namespace facedet
+
+} // namespace vision
+
+} // namespacefastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/facedet/contrib/centerface/preprocessor.h b/fastdeploy/vision/facedet/contrib/centerface/preprocessor.h
new file mode 100644
index 0000000000..a856306cbc
--- /dev/null
+++ b/fastdeploy/vision/facedet/contrib/centerface/preprocessor.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+
+namespace vision {
+
+namespace facedet {
+
+class FASTDEPLOY_DECL CenterFacePreprocessor{
+ public:
+ /** \brief Create a preprocessor instance for CenterFace serials model
+ */
+ CenterFacePreprocessor();
+
+ /** \brief Process the input image and prepare input tensors for runtime
+ *
+ * \param[in] images The input image data list, all the elements are returned by cv::imread()
+ * \param[in] outputs The output tensors which will feed in runtime
+ * \param[in] ims_info The shape info list, record input_shape and output_shape
+ * \ret
+ */
+ bool Run(std::vector* images, std::vector* outputs,
+ std::vector>>* ims_info);
+
+ /// Set target size, tuple of (width, height), default size = {640, 640}
+ void SetSize(const std::vector& size) { size_ = size; }
+
+ /// Get target size, tuple of (width, height), default size = {640, 640}
+ std::vector GetSize() const { return size_; }
+
+
+ protected:
+ bool Preprocess(FDMat * mat, FDTensor* output,
+ std::map>* im_info);
+
+ // target size, tuple of (width, height), default size = {640, 640}
+ std::vector size_;
+
+};
+
+} // namespace facedet
+
+} // namespace vision
+
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/facedet/facedet_pybind.cc b/fastdeploy/vision/facedet/facedet_pybind.cc
index cf12399e2c..a36eb0b838 100644
--- a/fastdeploy/vision/facedet/facedet_pybind.cc
+++ b/fastdeploy/vision/facedet/facedet_pybind.cc
@@ -20,6 +20,7 @@ void BindRetinaFace(pybind11::module& m);
void BindUltraFace(pybind11::module& m);
void BindYOLOv5Face(pybind11::module& m);
void BindYOLOv7Face(pybind11::module& m);
+void BindCenterFace(pybind11::module& m);
void BindBlazeFace(pybind11::module& m);
void BindSCRFD(pybind11::module& m);
@@ -29,6 +30,7 @@ void BindFaceDet(pybind11::module& m) {
BindUltraFace(facedet_module);
BindYOLOv5Face(facedet_module);
BindYOLOv7Face(facedet_module);
+ BindCenterFace(facedet_module);
BindBlazeFace(facedet_module);
BindSCRFD(facedet_module);
}
diff --git a/python/fastdeploy/vision/facedet/__init__.py b/python/fastdeploy/vision/facedet/__init__.py
index a96cb791c8..d1c771c2b1 100644
--- a/python/fastdeploy/vision/facedet/__init__.py
+++ b/python/fastdeploy/vision/facedet/__init__.py
@@ -15,6 +15,7 @@
from __future__ import absolute_import
from .contrib.yolov5face import YOLOv5Face
from .contrib.yolov7face import *
+from .contrib.centerface import *
from .contrib.blazeface import *
from .contrib.retinaface import RetinaFace
from .contrib.scrfd import SCRFD
diff --git a/python/fastdeploy/vision/facedet/contrib/centerface.py b/python/fastdeploy/vision/facedet/contrib/centerface.py
new file mode 100644
index 0000000000..27a1397892
--- /dev/null
+++ b/python/fastdeploy/vision/facedet/contrib/centerface.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import FastDeployModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class CenterFacePreprocessor:
+ def __init__(self):
+ """Create a preprocessor for CenterFace
+ """
+ self._preprocessor = C.vision.facedet.CenterFacePreprocessor()
+
+ def run(self, input_ims):
+ """Preprocess input images for CenterFace
+
+ :param: input_ims: (list of numpy.ndarray)The input image
+ :return: list of FDTensor
+ """
+ return self._preprocessor.run(input_ims)
+
+ @property
+ def size(self):
+ """
+ Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+ """
+ return self._preprocessor.size
+
+ @size.setter
+ def size(self, wh):
+ assert isinstance(wh, (list, tuple)),\
+ "The value to set `size` must be type of tuple or list."
+ assert len(wh) == 2,\
+ "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+ len(wh))
+ self._preprocessor.size = wh
+
+
+class CenterFacePostprocessor:
+ def __init__(self):
+ """Create a postprocessor for CenterFace
+ """
+ self._postprocessor = C.vision.facedet.CenterFacePostprocessor()
+
+ def run(self, runtime_results, ims_info):
+ """Postprocess the runtime results for CenterFace
+
+ :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+ :param: ims_info: (list of dict)Record input_shape and output_shape
+ :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+ """
+ return self._postprocessor.run(runtime_results, ims_info)
+
+ @property
+ def conf_threshold(self):
+ """
+ confidence threshold for postprocessing, default is 0.5
+ """
+ return self._postprocessor.conf_threshold
+
+ @property
+ def nms_threshold(self):
+ """
+ nms threshold for postprocessing, default is 0.3
+ """
+ return self._postprocessor.nms_threshold
+
+ @conf_threshold.setter
+ def conf_threshold(self, conf_threshold):
+ assert isinstance(conf_threshold, float),\
+ "The value to set `conf_threshold` must be type of float."
+ self._postprocessor.conf_threshold = conf_threshold
+
+ @nms_threshold.setter
+ def nms_threshold(self, nms_threshold):
+ assert isinstance(nms_threshold, float),\
+ "The value to set `nms_threshold` must be type of float."
+ self._postprocessor.nms_threshold = nms_threshold
+
+
+class CenterFace(FastDeployModel):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a CenterFace model exported by CenterFace.
+
+ :param model_file: (str)Path of model file, e.g ./CenterFace.onnx
+ :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+ super(CenterFace, self).__init__(runtime_option)
+
+ self._model = C.vision.facedet.CenterFace(
+ model_file, params_file, self._runtime_option, model_format)
+
+ assert self.initialized, "CenterFace initialize failed."
+
+ def predict(self, input_image):
+ """Detect the location and key points of human faces from an input image
+ :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+ :return: FaceDetectionResult
+ """
+ return self._model.predict(input_image)
+
+ def batch_predict(self, images):
+ """Classify a batch of input image
+
+ :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+ :return list of DetectionResult
+ """
+
+ return self._model.batch_predict(images)
+
+ @property
+ def preprocessor(self):
+ """Get CenterFacePreprocessor object of the loaded model
+
+ :return CenterFacePreprocessor
+ """
+ return self._model.preprocessor
+
+ @property
+ def postprocessor(self):
+ """Get CenterFacePostprocessor object of the loaded model
+
+ :return CenterFacePostprocessor
+ """
+ return self._model.postprocessor
diff --git a/tests/models/test_centerface.py b/tests/models/test_centerface.py
new file mode 100644
index 0000000000..9dbbff0c18
--- /dev/null
+++ b/tests/models/test_centerface.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from fastdeploy import ModelFormat
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+import runtime_config as rc
+
+
+def test_facedet_centerface():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx"
+ input_url1 = "https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg"
+ result_url1 = "https://bj.bcebos.com/paddlehub/fastdeploy/centerface_result1.pkl"
+ fd.download(model_url, "resources")
+ fd.download(input_url1, "resources")
+ fd.download(result_url1, "resources")
+
+ model_file = "resources/CenterFace.onnx"
+ model = fd.vision.facedet.CenterFace(
+ model_file, runtime_option=rc.test_option)
+
+ with open("resources/centerface_result1.pkl", "rb") as f:
+ expect1 = pickle.load(f)
+
+ # compare diff
+ im1 = cv2.imread("./resources/test_lite_face_detector_3.jpg")
+ print(expect1)
+ for i in range(3):
+ # test single predict
+ result1 = model.predict(im1)
+
+ diff_boxes_1 = np.fabs(
+ np.array(result1.boxes) - np.array(expect1["boxes"]))
+ diff_scores_1 = np.fabs(
+ np.array(result1.scores) - np.array(expect1["scores"]))
+
+ assert diff_boxes_1.max(
+ ) < 1e-04, "There's difference in detection boxes 1."
+ assert diff_scores_1.max(
+ ) < 1e-05, "There's difference in detection score 1."
+
+def test_facedet_centerface_runtime():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/CenterFace.onnx"
+ input_url1 = "https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg"
+ result_url1 = "https://bj.bcebos.com/paddlehub/fastdeploy/centerface_result1.pkl"
+ fd.download(model_url, "resources")
+ fd.download(input_url1, "resources")
+ fd.download(result_url1, "resources")
+
+ model_file = "resources/CenterFace.onnx"
+
+ preprocessor = fd.vision.facedet.CenterFacePreprocessor()
+ postprocessor = fd.vision.facedet.CenterFacePostprocessor()
+
+ rc.test_option.set_model_path(model_file, model_format=ModelFormat.ONNX)
+ rc.test_option.use_openvino_backend()
+ runtime = fd.Runtime(rc.test_option)
+
+ with open("resources/centerface_result1.pkl", "rb") as f:
+ expect1 = pickle.load(f)
+
+ # compare diff
+ im1 = cv2.imread("./resources/test_lite_face_detector_3.jpg")
+
+ for i in range(3):
+ # test runtime
+ input_tensors, ims_info = preprocessor.run([im1.copy()])
+ output_tensors = runtime.infer({"input.1": input_tensors[0]})
+ results = postprocessor.run(output_tensors, ims_info)
+ result1 = results[0]
+
+ diff_boxes_1 = np.fabs(
+ np.array(result1.boxes) - np.array(expect1["boxes"]))
+ diff_scores_1 = np.fabs(
+ np.array(result1.scores) - np.array(expect1["scores"]))
+
+ assert diff_boxes_1.max(
+ ) < 1e-04, "There's difference in detection boxes 1."
+ assert diff_scores_1.max(
+ ) < 1e-05, "There's difference in detection score 1."
+
+
+if __name__ == "__main__":
+ test_facedet_centerface()
+ test_facedet_centerface_runtime()
\ No newline at end of file