Skip to content

Commit

Permalink
[Doc] add doxygen docs for c sharp api (PaddlePaddle#1495)
Browse files Browse the repository at this point in the history
add doxygen docs for c sharp api

Co-authored-by: DefTruth <[email protected]>
  • Loading branch information
rainyfly and DefTruth authored Apr 9, 2023
1 parent ed19c75 commit fc15124
Show file tree
Hide file tree
Showing 11 changed files with 3,201 additions and 36 deletions.
16 changes: 8 additions & 8 deletions csharp/fastdeploy/enum_varaibles.cs
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,20 @@ public enum ModelFormat {
}

public enum rknpu2_CpuName {
RK356X = 0, /* run on RK356X. */
RK3588 = 1, /* default,run on RK3588. */
RK356X = 0, ///< run on RK356X.
RK3588 = 1, ///< default,run on RK3588.
UNDEFINED,
}

public enum rknpu2_CoreMask {
RKNN_NPU_CORE_AUTO = 0, //< default, run on NPU core randomly.
RKNN_NPU_CORE_0 = 1, //< run on NPU core 0.
RKNN_NPU_CORE_1 = 2, //< run on NPU core 1.
RKNN_NPU_CORE_2 = 4, //< run on NPU core 2.
RKNN_NPU_CORE_AUTO = 0, ///< default, run on NPU core randomly.
RKNN_NPU_CORE_0 = 1, ///< run on NPU core 0.
RKNN_NPU_CORE_1 = 2, ///< run on NPU core 1.
RKNN_NPU_CORE_2 = 4, ///< run on NPU core 2.
RKNN_NPU_CORE_0_1 =
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, //< run on NPU core 1 and core 2.
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, ///< run on NPU core 1 and core 2.
RKNN_NPU_CORE_0_1_2 =
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, //< run on NPU core 1 and core 2.
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, ///< run on NPU core 1 and core 2.
RKNN_NPU_CORE_UNDEFINED,
}

Expand Down
54 changes: 54 additions & 0 deletions csharp/fastdeploy/runtime_option.cs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

namespace fastdeploy {

/*! @brief Option object used when create a new Runtime object
*/
public class RuntimeOption {

public RuntimeOption() {
Expand All @@ -28,41 +30,80 @@ public RuntimeOption() {
FD_C_DestroyRuntimeOptionWrapper(fd_runtime_option_wrapper);
}

/** \brief Set path of model file and parameter file
*
* \param[in] model_path Path of model file, e.g ResNet50/model.pdmodel for Paddle format model / ResNet50/model.onnx for ONNX format model
* \param[in] params_path Path of parameter file, this only used when the model format is Paddle, e.g Resnet50/model.pdiparams
* \param[in] format Format of the loaded model
*/
public void SetModelPath(string model_path, string params_path = "",
ModelFormat format = ModelFormat.PADDLE) {
FD_C_RuntimeOptionWrapperSetModelPath(fd_runtime_option_wrapper, model_path,
params_path, format);
}

/** \brief Specify the memory buffer of model and parameter. Used when model and params are loaded directly from memory
*
* \param[in] model_buffer The string of model memory buffer
* \param[in] params_buffer The string of parameters memory buffer
* \param[in] format Format of the loaded model
*/
public void SetModelBuffer(string model_buffer, string params_buffer = "",
ModelFormat format = ModelFormat.PADDLE) {
FD_C_RuntimeOptionWrapperSetModelBuffer(
fd_runtime_option_wrapper, model_buffer, params_buffer, format);
}

/// Use cpu to inference, the runtime will inference on CPU by default
public void UseCpu() {
FD_C_RuntimeOptionWrapperUseCpu(fd_runtime_option_wrapper);
}

/// Use Nvidia GPU to inference
public void UseGpu(int gpu_id = 0) {
FD_C_RuntimeOptionWrapperUseGpu(fd_runtime_option_wrapper, gpu_id);
}

/// Use RKNPU2 e.g RK3588/RK356X to inference
public void
UseRKNPU2(rknpu2_CpuName rknpu2_name = rknpu2_CpuName.RK3588,
rknpu2_CoreMask rknpu2_core = rknpu2_CoreMask.RKNN_NPU_CORE_0) {
FD_C_RuntimeOptionWrapperUseRKNPU2(fd_runtime_option_wrapper, rknpu2_name,
rknpu2_core);
}

/// Use TimVX e.g RV1126/A311D to inference
public void UseTimVX() {
FD_C_RuntimeOptionWrapperUseTimVX(fd_runtime_option_wrapper);
}

/// Use Huawei Ascend to inference
public void UseAscend() {
FD_C_RuntimeOptionWrapperUseAscend(fd_runtime_option_wrapper);
}

/// \brief Turn on KunlunXin XPU.
///
/// \param kunlunxin_id the KunlunXin XPU card to use (default is 0).
/// \param l3_workspace_size The size of the video memory allocated by the l3
/// cache, the maximum is 16M.
/// \param locked Whether the allocated L3 cache can be locked. If false,
/// it means that the L3 cache is not locked, and the allocated L3
/// cache can be shared by multiple models, and multiple models
/// sharing the L3 cache will be executed sequentially on the card.
/// \param autotune Whether to autotune the conv operator in the model. If
/// true, when the conv operator of a certain dimension is executed
/// for the first time, it will automatically search for a better
/// algorithm to improve the performance of subsequent conv operators
/// of the same dimension.
/// \param autotune_file Specify the path of the autotune file. If
/// autotune_file is specified, the algorithm specified in the
/// file will be used and autotune will not be performed again.
/// \param precision Calculation accuracy of multi_encoder
/// \param adaptive_seqlen Is the input of multi_encoder variable length
/// \param enable_multi_stream Whether to enable the multi stream of
/// KunlunXin XPU.
///
public void
UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
bool locked = false, bool autotune = true,
Expand All @@ -74,6 +115,7 @@ public void
enable_multi_stream);
}

/// Use Sophgo to inference
public void UseSophgo() {
FD_C_RuntimeOptionWrapperUseSophgo(fd_runtime_option_wrapper);
}
Expand All @@ -83,6 +125,9 @@ public void SetExternalStream(IntPtr external_stream) {
external_stream);
}

/*
* @brief Set number of cpu threads while inference on CPU, by default it will decided by the different backends
*/
public void SetCpuThreadNum(int thread_num) {
FD_C_RuntimeOptionWrapperSetCpuThreadNum(fd_runtime_option_wrapper,
thread_num);
Expand All @@ -97,38 +142,47 @@ public void UsePaddleBackend() {
FD_C_RuntimeOptionWrapperUsePaddleBackend(fd_runtime_option_wrapper);
}

/// Set Paddle Inference as inference backend, support CPU/GPU
public void UsePaddleInferBackend() {
FD_C_RuntimeOptionWrapperUsePaddleInferBackend(fd_runtime_option_wrapper);
}

/// Set ONNX Runtime as inference backend, support CPU/GPU
public void UseOrtBackend() {
FD_C_RuntimeOptionWrapperUseOrtBackend(fd_runtime_option_wrapper);
}

/// Set SOPHGO Runtime as inference backend, support SOPHGO
public void UseSophgoBackend() {
FD_C_RuntimeOptionWrapperUseSophgoBackend(fd_runtime_option_wrapper);
}

/// Set TensorRT as inference backend, only support GPU
public void UseTrtBackend() {
FD_C_RuntimeOptionWrapperUseTrtBackend(fd_runtime_option_wrapper);
}

/// Set Poros backend as inference backend, support CPU/GPU
public void UsePorosBackend() {
FD_C_RuntimeOptionWrapperUsePorosBackend(fd_runtime_option_wrapper);
}

/// Set OpenVINO as inference backend, only support CPU
public void UseOpenVINOBackend() {
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(fd_runtime_option_wrapper);
}

/// Set Paddle Lite as inference backend, only support arm cpu
public void UseLiteBackend() {
FD_C_RuntimeOptionWrapperUseLiteBackend(fd_runtime_option_wrapper);
}

/// Set Paddle Lite as inference backend, only support arm cpu
public void UsePaddleLiteBackend() {
FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(fd_runtime_option_wrapper);
}


public void SetPaddleMKLDNN(bool pd_mkldnn = true) {
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(fd_runtime_option_wrapper,
pd_mkldnn);
Expand Down
25 changes: 24 additions & 1 deletion csharp/fastdeploy/vision/classification/ppcls/model.cs
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,18 @@ namespace fastdeploy {
namespace vision {
namespace classification {

/*! @brief PaddleClas serials model object used when to load a PaddleClas model exported by PaddleClas repository
*/
public class PaddleClasModel {

/** \brief Set path of model file and configuration file, and the configuration of runtime
*
* \param[in] model_file Path of model file, e.g resnet/model.pdmodel
* \param[in] params_file Path of parameter file, e.g resnet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] config_file Path of configuration file for deployment, e.g resnet/infer_cfg.yml
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
* \param[in] model_format Model format of the loaded model, default is Paddle format
*/
public PaddleClasModel(string model_file, string params_file,
string config_file, RuntimeOption custom_option = null,
ModelFormat model_format = ModelFormat.PADDLE) {
Expand All @@ -40,11 +50,17 @@ public PaddleClasModel(string model_file, string params_file,
FD_C_DestroyPaddleClasModelWrapper(fd_paddleclas_model_wrapper);
}


/// Get model's name
public string ModelName() {
return "PaddleClas/Model";
}

/** \brief DEPRECATED Predict the classification result for an input image, remove at 1.0 version
*
* \param[in] im The input image data, comes from cv::imread()
*
* \return ClassifyResult
*/
public ClassifyResult Predict(Mat img) {
FD_ClassifyResult fd_classify_result = new FD_ClassifyResult();
if(! FD_C_PaddleClasModelWrapperPredict(
Expand All @@ -59,6 +75,12 @@ public ClassifyResult Predict(Mat img) {
return classify_result;
}

/** \brief Predict the classification results for a batch of input images
*
* \param[in] imgs, The input image list, each element comes from cv::imread()
*
* \return List<ClassifyResult>
*/
public List<ClassifyResult> BatchPredict(List<Mat> imgs){
FD_OneDimMat imgs_in = new FD_OneDimMat();
imgs_in.size = (nuint)imgs.Count;
Expand Down Expand Up @@ -86,6 +108,7 @@ public List<ClassifyResult> BatchPredict(List<Mat> imgs){
return results_out;
}

/// Check whether model is initialized successfully
public bool Initialized() {
return FD_C_PaddleClasModelWrapperInitialized(fd_paddleclas_model_wrapper);
}
Expand Down
Loading

0 comments on commit fc15124

Please sign in to comment.