Skip to content

Commit a51e5a6

Browse files
authored
[Android] Add android aar package (#416)
* [Android] Add Android build docs and demo (#26) * [Backend] Add override flag to lite backend * [Docs] Add Android C++ SDK build docs * [Doc] fix android_build_docs typos * Update CMakeLists.txt * Update android.md * [Doc] Add PicoDet Android demo docs * [Doc] Update PicoDet Andorid demo docs * [Doc] Update PaddleClasModel Android demo docs * [Doc] Update fastdeploy android jni docs * [Doc] Update fastdeploy android jni usage docs * [Android] init fastdeploy android jar package * [Backend] support int8 option for lite backend * [Model] add Backend::Lite to paddle model * [Backend] use CopyFromCpu for lite backend. * [Android] package jni srcs and java api into aar * Update infer.cc * Update infer.cc * [Android] Update package build.gradle * [Android] Update android app examples * [Android] update android detection app
1 parent b064ddf commit a51e5a6

File tree

137 files changed

+4664
-37
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

137 files changed

+4664
-37
lines changed

examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::FasterRCNN(
28-
model_file, params_file, config_file);
30+
model_file, params_file, config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/detection/paddledetection/cpp/infer_mask_rcnn.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::MaskRCNN(model_file, params_file,
28-
config_file);
30+
config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/detection/paddledetection/cpp/infer_picodet.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::PicoDet(model_file, params_file,
28-
config_file);
30+
config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::PPYOLO(model_file, params_file,
28-
config_file);
30+
config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
28-
config_file);
30+
config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/detection/paddledetection/cpp/infer_yolov3.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::YOLOv3(model_file, params_file,
28-
config_file);
30+
config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/detection/paddledetection/cpp/infer_yolox.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "infer_cfg.yml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::detection::PaddleYOLOX(
28-
model_file, params_file, config_file);
30+
model_file, params_file, config_file, option);
2931
if (!model.Initialized()) {
3032
std::cerr << "Failed to initialize." << std::endl;
3133
return;

examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,10 @@ void CpuInfer(const std::string& tinypose_model_dir,
2525
auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel";
2626
auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams";
2727
auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml";
28+
auto option = fastdeploy::RuntimeOption();
29+
option.UseCpu();
2830
auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose(
29-
tinypose_model_file, tinypose_params_file, tinypose_config_file);
31+
tinypose_model_file, tinypose_params_file, tinypose_config_file, option);
3032
if (!tinypose_model.Initialized()) {
3133
std::cerr << "TinyPose Model Failed to initialize." << std::endl;
3234
return;

examples/vision/matting/ppmatting/cpp/infer.cc

+1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file,
2626
auto params_file = model_dir + sep + "model.pdiparams";
2727
auto config_file = model_dir + sep + "deploy.yaml";
2828
auto option = fastdeploy::RuntimeOption();
29+
option.UseCpu();
2930
auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file,
3031
config_file, option);
3132
if (!model.Initialized()) {

examples/vision/segmentation/paddleseg/cpp/infer.cc

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
2424
auto model_file = model_dir + sep + "model.pdmodel";
2525
auto params_file = model_dir + sep + "model.pdiparams";
2626
auto config_file = model_dir + sep + "deploy.yaml";
27+
auto option = fastdeploy::RuntimeOption();
28+
option.UseCpu();
2729
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
28-
model_file, params_file, config_file);
30+
model_file, params_file, config_file, option);
2931

3032
if (!model.Initialized()) {
3133
std::cerr << "Failed to initialize." << std::endl;

fastdeploy/backends/lite/lite_backend.cc

+32-12
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,11 @@ FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
4242
void LiteBackend::BuildOption(const LiteBackendOption& option) {
4343
option_ = option;
4444
std::vector<paddle::lite_api::Place> valid_places;
45-
if (option.enable_fp16) {
45+
if (option_.enable_int8) {
46+
valid_places.push_back(
47+
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
48+
}
49+
if (option_.enable_fp16) {
4650
paddle::lite_api::MobileConfig check_fp16_config;
4751
// Determine whether the device supports the FP16
4852
// instruction set (or whether it is an arm device
@@ -58,12 +62,12 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
5862
valid_places.push_back(
5963
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
6064
config_.set_valid_places(valid_places);
61-
if (option.threads > 0) {
62-
config_.set_threads(option.threads);
65+
if (option_.threads > 0) {
66+
config_.set_threads(option_.threads);
6367
}
64-
if (option.power_mode > 0) {
68+
if (option_.power_mode > 0) {
6569
config_.set_power_mode(
66-
static_cast<paddle::lite_api::PowerMode>(option.power_mode));
70+
static_cast<paddle::lite_api::PowerMode>(option_.power_mode));
6771
}
6872
}
6973

@@ -136,14 +140,13 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
136140
std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }
137141

138142
bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
139-
std::vector<FDTensor>* outputs) {
143+
std::vector<FDTensor>* outputs) {
140144
if (inputs.size() != inputs_desc_.size()) {
141145
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
142146
<< ") should keep same with the inputs of this model("
143147
<< inputs_desc_.size() << ")." << std::endl;
144148
return false;
145149
}
146-
147150
for (size_t i = 0; i < inputs.size(); ++i) {
148151
auto iter = inputs_order_.find(inputs[i].name);
149152
if (iter == inputs_order_.end()) {
@@ -152,12 +155,29 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
152155
return false;
153156
}
154157
auto tensor = predictor_->GetInput(iter->second);
155-
tensor->Resize(inputs[i].shape);
156-
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()),
157-
inputs[i].Nbytes(),
158-
paddle::lite_api::TargetType::kARM);
158+
// Adjust dims only, allocate lazy.
159+
tensor->Resize(inputs[i].shape);
160+
if (inputs[i].dtype == FDDataType::FP32) {
161+
tensor->CopyFromCpu<float, paddle::lite_api::TargetType::kARM>(
162+
reinterpret_cast<const float*>(const_cast<void*>(
163+
inputs[i].CpuData())));
164+
} else if (inputs[i].dtype == FDDataType::INT32) {
165+
tensor->CopyFromCpu<int, paddle::lite_api::TargetType::kARM>(
166+
reinterpret_cast<const int*>(const_cast<void*>(
167+
inputs[i].CpuData())));
168+
} else if (inputs[i].dtype == FDDataType::INT8) {
169+
tensor->CopyFromCpu<int8_t, paddle::lite_api::TargetType::kARM>(
170+
reinterpret_cast<const int8_t*>(const_cast<void*>(
171+
inputs[i].CpuData())));
172+
} else if (inputs[i].dtype == FDDataType::UINT8) {
173+
tensor->CopyFromCpu<uint8_t, paddle::lite_api::TargetType::kARM>(
174+
reinterpret_cast<const uint8_t*>(const_cast<void*>(
175+
inputs[i].CpuData())));
176+
} else {
177+
FDASSERT(false, "Unexpected data type of %d.", inputs[i].dtype);
178+
}
159179
}
160-
180+
161181
predictor_->Run();
162182

163183
outputs->resize(outputs_desc_.size());

fastdeploy/backends/lite/lite_backend.h

+2
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ struct LiteBackendOption {
3737
int power_mode = 3;
3838
// enable fp16
3939
bool enable_fp16 = false;
40+
// enable int8
41+
bool enable_int8 = false;
4042
// optimized model dir for CxxConfig
4143
std::string optimized_model_dir = "";
4244
// TODO(qiuyanjun): support more options for lite backend.

fastdeploy/runtime.cc

+12-1
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,17 @@ void RuntimeOption::EnableLiteFP16() {
321321
lite_enable_fp16 = true;
322322
}
323323

324-
void RuntimeOption::DisableLiteFP16() { lite_enable_fp16 = false; }
324+
void RuntimeOption::DisableLiteFP16() {
325+
lite_enable_fp16 = false;
326+
}
327+
328+
void RuntimeOption::EnableLiteInt8() {
329+
lite_enable_int8 = true;
330+
}
331+
332+
void RuntimeOption::DisableLiteInt8() {
333+
lite_enable_int8 = false;
334+
}
325335

326336
void RuntimeOption::SetLitePowerMode(LitePowerMode mode) {
327337
lite_power_mode = mode;
@@ -650,6 +660,7 @@ void Runtime::CreateLiteBackend() {
650660
#ifdef ENABLE_LITE_BACKEND
651661
auto lite_option = LiteBackendOption();
652662
lite_option.threads = option.cpu_thread_num;
663+
lite_option.enable_int8 = option.lite_enable_int8;
653664
lite_option.enable_fp16 = option.lite_enable_fp16;
654665
lite_option.power_mode = static_cast<int>(option.lite_power_mode);
655666
lite_option.optimized_model_dir = option.lite_optimized_model_dir;

fastdeploy/runtime.h

+12
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,16 @@ struct FASTDEPLOY_DECL RuntimeOption {
173173
*/
174174
void DisableLiteFP16();
175175

176+
/**
177+
* @brief enable int8 precision while use paddle lite backend
178+
*/
179+
void EnableLiteInt8();
180+
181+
/**
182+
* @brief disable int8 precision, change to full precision(float32)
183+
*/
184+
void DisableLiteInt8();
185+
176186
/**
177187
* @brief Set power mode while using Paddle Lite as inference backend, mode(0: LITE_POWER_HIGH; 1: LITE_POWER_LOW; 2: LITE_POWER_FULL; 3: LITE_POWER_NO_BIND, 4: LITE_POWER_RAND_HIGH; 5: LITE_POWER_RAND_LOW, refer [paddle lite](https://paddle-lite.readthedocs.io/zh/latest/api_reference/cxx_api_doc.html#set-power-mode) for more details)
178188
*/
@@ -260,6 +270,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
260270
// 3: LITE_POWER_NO_BIND 4: LITE_POWER_RAND_HIGH
261271
// 5: LITE_POWER_RAND_LOW
262272
LitePowerMode lite_power_mode = LitePowerMode::LITE_POWER_NO_BIND;
273+
// enable int8 or not
274+
bool lite_enable_int8 = false;
263275
// enable fp16 or not
264276
bool lite_enable_fp16 = false;
265277
// optimized model dir for CxxConfig

fastdeploy/vision/detection/ppdet/mask_rcnn.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ MaskRCNN::MaskRCNN(const std::string& model_file,
2424
const RuntimeOption& custom_option,
2525
const ModelFormat& model_format) {
2626
config_file_ = config_file;
27-
valid_cpu_backends = {Backend::PDINFER};
27+
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
2828
valid_gpu_backends = {Backend::PDINFER};
2929
runtime_option = custom_option;
3030
runtime_option.model_format = model_format;

fastdeploy/vision/detection/ppdet/ppyolo.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ PPYOLO::PPYOLO(const std::string& model_file, const std::string& params_file,
2323
const RuntimeOption& custom_option,
2424
const ModelFormat& model_format) {
2525
config_file_ = config_file;
26-
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
26+
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
2727
valid_gpu_backends = {Backend::PDINFER};
2828
has_nms_ = true;
2929
runtime_option = custom_option;

fastdeploy/vision/detection/ppdet/ppyoloe.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file,
1414
const RuntimeOption& custom_option,
1515
const ModelFormat& model_format) {
1616
config_file_ = config_file;
17-
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
17+
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, Backend::LITE};
1818
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
1919
runtime_option = custom_option;
2020
runtime_option.model_format = model_format;

fastdeploy/vision/detection/ppdet/rcnn.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ FasterRCNN::FasterRCNN(const std::string& model_file,
2424
const RuntimeOption& custom_option,
2525
const ModelFormat& model_format) {
2626
config_file_ = config_file;
27-
valid_cpu_backends = {Backend::PDINFER};
27+
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
2828
valid_gpu_backends = {Backend::PDINFER};
2929
has_nms_ = true;
3030
runtime_option = custom_option;

fastdeploy/vision/detection/ppdet/yolov3.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ YOLOv3::YOLOv3(const std::string& model_file, const std::string& params_file,
2323
const RuntimeOption& custom_option,
2424
const ModelFormat& model_format) {
2525
config_file_ = config_file;
26-
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
26+
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, Backend::LITE};
2727
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
2828
runtime_option = custom_option;
2929
runtime_option.model_format = model_format;

fastdeploy/vision/detection/ppdet/yolox.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ PaddleYOLOX::PaddleYOLOX(const std::string& model_file,
2424
const RuntimeOption& custom_option,
2525
const ModelFormat& model_format) {
2626
config_file_ = config_file;
27-
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
27+
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
2828
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
2929
runtime_option = custom_option;
3030
runtime_option.model_format = model_format;

fastdeploy/vision/faceid/contrib/insightface_rec.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ InsightFaceRecognitionModel::InsightFaceRecognitionModel(
3030
valid_cpu_backends = {Backend::ORT};
3131
valid_gpu_backends = {Backend::ORT, Backend::TRT};
3232
} else {
33-
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
33+
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
3434
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
3535
}
3636
runtime_option = custom_option;

fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file,
1616
const RuntimeOption& custom_option,
1717
const ModelFormat& model_format) {
1818
config_file_ = config_file;
19-
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
19+
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
2020
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
2121
runtime_option = custom_option;
2222
runtime_option.model_format = model_format;

fastdeploy/vision/matting/ppmatting/ppmatting.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ PPMatting::PPMatting(const std::string& model_file,
2525
const RuntimeOption& custom_option,
2626
const ModelFormat& model_format) {
2727
config_file_ = config_file;
28-
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
28+
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
2929
valid_gpu_backends = {Backend::PDINFER, Backend::TRT};
3030
runtime_option = custom_option;
3131
runtime_option.model_format = model_format;

fastdeploy/vision/ocr/ppocr/classifier.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ Classifier::Classifier(const std::string& model_file,
3030
Backend::OPENVINO};
3131
valid_gpu_backends = {Backend::ORT, Backend::TRT};
3232
} else {
33-
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
33+
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
3434
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
3535
}
3636
runtime_option = custom_option;

fastdeploy/vision/ocr/ppocr/dbdetector.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ DBDetector::DBDetector(const std::string& model_file,
3030
Backend::OPENVINO};
3131
valid_gpu_backends = {Backend::ORT, Backend::TRT};
3232
} else {
33-
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
33+
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
3434
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
3535
}
3636

fastdeploy/vision/ocr/ppocr/ppocr_v2.cc

-1
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ bool PPOCRv2::Predict(cv::Mat* img,
110110
if (nullptr != classifier_ && result->cls_labels[i] % 2 == 1 && result->cls_scores[i] > classifier_->cls_thresh) {
111111
cv::rotate(image_list[i], image_list[i], 1);
112112
}
113-
114113
if (nullptr != recognizer_ && !Recognize(&(image_list[i]), result)) {
115114
FDERROR << "Failed to recgnize croped image of index " << i << "." << std::endl;
116115
return false;

fastdeploy/vision/ocr/ppocr/recognizer.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ Recognizer::Recognizer(const std::string& model_file,
4848
Backend::OPENVINO};
4949
valid_gpu_backends = {Backend::ORT, Backend::TRT};
5050
} else {
51-
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
51+
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
5252
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
5353
}
5454

fastdeploy/vision/segmentation/ppseg/model.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
2626
const RuntimeOption& custom_option,
2727
const ModelFormat& model_format) {
2828
config_file_ = config_file;
29-
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT};
29+
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT, Backend::LITE};
3030
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
3131
runtime_option = custom_option;
3232
runtime_option.model_format = model_format;
@@ -106,7 +106,7 @@ bool PaddleSegModel::BuildPreprocessPipelineFromConfig() {
106106
<< "Please refer to https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_export.md"
107107
<< " to export model with fixed input shape."
108108
<< std::endl;
109-
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
109+
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
110110
valid_gpu_backends = {Backend::PDINFER};
111111
}
112112
if (input_height != -1 && input_width != -1 && !yml_contain_resize_op) {

0 commit comments

Comments
 (0)