@@ -42,7 +42,11 @@ FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
42
42
void LiteBackend::BuildOption (const LiteBackendOption& option) {
43
43
option_ = option;
44
44
std::vector<paddle::lite_api::Place> valid_places;
45
- if (option.enable_fp16 ) {
45
+ if (option_.enable_int8 ) {
46
+ valid_places.push_back (
47
+ paddle::lite_api::Place{TARGET (kARM ), PRECISION (kInt8 )});
48
+ }
49
+ if (option_.enable_fp16 ) {
46
50
paddle::lite_api::MobileConfig check_fp16_config;
47
51
// Determine whether the device supports the FP16
48
52
// instruction set (or whether it is an arm device
@@ -58,12 +62,12 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
58
62
valid_places.push_back (
59
63
paddle::lite_api::Place{TARGET (kARM ), PRECISION (kFloat )});
60
64
config_.set_valid_places (valid_places);
61
- if (option .threads > 0 ) {
62
- config_.set_threads (option .threads );
65
+ if (option_ .threads > 0 ) {
66
+ config_.set_threads (option_ .threads );
63
67
}
64
- if (option .power_mode > 0 ) {
68
+ if (option_ .power_mode > 0 ) {
65
69
config_.set_power_mode (
66
- static_cast <paddle::lite_api::PowerMode>(option .power_mode ));
70
+ static_cast <paddle::lite_api::PowerMode>(option_ .power_mode ));
67
71
}
68
72
}
69
73
@@ -136,14 +140,13 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
136
140
std::vector<TensorInfo> LiteBackend::GetOutputInfos () { return outputs_desc_; }
137
141
138
142
bool LiteBackend::Infer (std::vector<FDTensor>& inputs,
139
- std::vector<FDTensor>* outputs) {
143
+ std::vector<FDTensor>* outputs) {
140
144
if (inputs.size () != inputs_desc_.size ()) {
141
145
FDERROR << " [LiteBackend] Size of inputs(" << inputs.size ()
142
146
<< " ) should keep same with the inputs of this model("
143
147
<< inputs_desc_.size () << " )." << std::endl;
144
148
return false ;
145
149
}
146
-
147
150
for (size_t i = 0 ; i < inputs.size (); ++i) {
148
151
auto iter = inputs_order_.find (inputs[i].name );
149
152
if (iter == inputs_order_.end ()) {
@@ -152,12 +155,29 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
152
155
return false ;
153
156
}
154
157
auto tensor = predictor_->GetInput (iter->second );
155
- tensor->Resize (inputs[i].shape );
156
- tensor->ShareExternalMemory (const_cast <void *>(inputs[i].CpuData ()),
157
- inputs[i].Nbytes (),
158
- paddle::lite_api::TargetType::kARM );
158
+ // Adjust dims only, allocate lazy.
159
+ tensor->Resize (inputs[i].shape );
160
+ if (inputs[i].dtype == FDDataType::FP32) {
161
+ tensor->CopyFromCpu <float , paddle::lite_api::TargetType::kARM >(
162
+ reinterpret_cast <const float *>(const_cast <void *>(
163
+ inputs[i].CpuData ())));
164
+ } else if (inputs[i].dtype == FDDataType::INT32) {
165
+ tensor->CopyFromCpu <int , paddle::lite_api::TargetType::kARM >(
166
+ reinterpret_cast <const int *>(const_cast <void *>(
167
+ inputs[i].CpuData ())));
168
+ } else if (inputs[i].dtype == FDDataType::INT8) {
169
+ tensor->CopyFromCpu <int8_t , paddle::lite_api::TargetType::kARM >(
170
+ reinterpret_cast <const int8_t *>(const_cast <void *>(
171
+ inputs[i].CpuData ())));
172
+ } else if (inputs[i].dtype == FDDataType::UINT8) {
173
+ tensor->CopyFromCpu <uint8_t , paddle::lite_api::TargetType::kARM >(
174
+ reinterpret_cast <const uint8_t *>(const_cast <void *>(
175
+ inputs[i].CpuData ())));
176
+ } else {
177
+ FDASSERT (false , " Unexpected data type of %d." , inputs[i].dtype );
178
+ }
159
179
}
160
-
180
+
161
181
predictor_->Run ();
162
182
163
183
outputs->resize (outputs_desc_.size ());
0 commit comments