Skip to content

Commit a45b893

Browse files
authored
Fix spelling, spacing, and unused variables (#236)
* Fix spelling, spacing, and unused variables * Fixed typo in 3 more models * Added open back * Remove setBackendAndTarget functions from C++ demos, which are unused functionality.
1 parent 80ef421 commit a45b893

File tree

11 files changed

+13
-58
lines changed

11 files changed

+13
-58
lines changed

models/face_detection_yunet/demo.cpp

-7
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,6 @@ class YuNet
3131
model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
3232
}
3333

34-
void setBackendAndTarget(int backend_id, int target_id)
35-
{
36-
backend_id_ = backend_id;
37-
target_id_ = target_id;
38-
model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
39-
}
40-
4134
/* Overwrite the input size when creating the model. Size format: [Width, Height].
4235
*/
4336
void setInputSize(const cv::Size& input_size)

models/image_classification_mobilenet/demo.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ int main(int argc, char** argv)
6969
else
7070
cap.open(0);
7171
if (!cap.isOpened())
72-
CV_Error(Error::StsError, "Cannot opend video or file");
72+
CV_Error(Error::StsError, "Cannot open video or file");
7373
Mat frame, blob;
7474
static const std::string kWinName = model;
7575
int nbInference = 0;

models/object_detection_nanodet/demo.cpp

+2-7
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,8 @@ class NanoDet
4646
{
4747
this->strides = { 8, 16, 32, 64 };
4848
this->net = readNet(modelPath);
49-
setBackendAndTarget(bId, tId);
49+
this->net.setPreferableBackend(bId);
50+
this->net.setPreferableTarget(tId);
5051
this->project = Mat::zeros(1, this->regMax + 1, CV_32F);
5152
for (size_t i = 0; i <= this->regMax; ++i)
5253
{
@@ -57,12 +58,6 @@ class NanoDet
5758
this->generateAnchors();
5859
}
5960

60-
void setBackendAndTarget(Backend bId, Target tId)
61-
{
62-
this->net.setPreferableBackend(bId);
63-
this->net.setPreferableTarget(tId);
64-
}
65-
6661
Mat preProcess(const Mat& inputImage)
6762
{
6863
Image2BlobParams paramNanodet;

models/object_detection_nanodet/demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def vis(preds, res_img, letterbox_scale, fps=None):
148148
img = vis(preds, image, letterbox_scale)
149149

150150
if args.save:
151-
print('Resutls saved to result.jpg\n')
151+
print('Results saved to result.jpg\n')
152152
cv.imwrite('result.jpg', img)
153153

154154
if args.vis:

models/object_detection_yolox/demo.cpp

+2-10
Original file line numberDiff line numberDiff line change
@@ -61,14 +61,6 @@ class YoloX {
6161
this->generateAnchors();
6262
}
6363

64-
void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
65-
{
66-
this->backendId = bId;
67-
this->targetId = tId;
68-
this->net.setPreferableBackend(this->backendId);
69-
this->net.setPreferableTarget(this->targetId);
70-
}
71-
7264
Mat preprocess(Mat img)
7365
{
7466
Mat blob;
@@ -137,7 +129,7 @@ class YoloX {
137129
boxesXYXY[r].height = boxes_xyxy.at<float>(r, 3);
138130
}
139131

140-
vector< int > keep;
132+
vector<int> keep;
141133
NMSBoxesBatched(boxesXYXY, maxScores, maxScoreIdx, this->confThreshold, this->nmsThreshold, keep);
142134
Mat candidates(int(keep.size()), 6, CV_32FC1);
143135
int row = 0;
@@ -282,7 +274,7 @@ int main(int argc, char** argv)
282274
else
283275
cap.open(0);
284276
if (!cap.isOpened())
285-
CV_Error(Error::StsError, "Cannot opend video or file");
277+
CV_Error(Error::StsError, "Cannot open video or file");
286278
Mat frame, inputBlob;
287279
double letterboxScale;
288280

models/object_detection_yolox/demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def vis(dets, srcimg, letterbox_scale, fps=None):
120120
img = vis(preds, image, letterbox_scale)
121121

122122
if args.save:
123-
print('Resutls saved to result.jpg\n')
123+
print('Results saved to result.jpg\n')
124124
cv.imwrite('result.jpg', img)
125125

126126
if args.vis:

models/person_detection_mediapipe/demo.cpp

+1-10
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,6 @@ class MPPersonDet {
4343
this->anchors = getMediapipeAnchor();
4444
}
4545

46-
void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
47-
{
48-
this->backendId = bId;
49-
this->targetId = tId;
50-
this->net.setPreferableBackend(this->backendId);
51-
this->net.setPreferableTarget(this->targetId);
52-
}
53-
5446
pair<Mat, Size> preprocess(Mat img)
5547
{
5648
Mat blob;
@@ -237,10 +229,9 @@ int main(int argc, char** argv)
237229
backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
238230
//! [Open a video file or an image file or a camera stream]
239231
if (!cap.isOpened())
240-
CV_Error(Error::StsError, "Cannot opend video or file");
232+
CV_Error(Error::StsError, "Cannot open video or file");
241233

242234
static const std::string kWinName = "MPPersonDet Demo";
243-
int nbInference = 0;
244235
while (waitKey(1) < 0)
245236
{
246237
cap >> frame;

models/pose_estimation_mediapipe/demo.cpp

+2-18
Original file line numberDiff line numberDiff line change
@@ -45,14 +45,6 @@ class MPPersonDet {
4545
this->anchors = getMediapipeAnchor();
4646
}
4747

48-
void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
49-
{
50-
this->backendId = bId;
51-
this->targetId = tId;
52-
this->net.setPreferableBackend(this->backendId);
53-
this->net.setPreferableTarget(this->targetId);
54-
}
55-
5648
pair<Mat, Size> preprocess(Mat img)
5749
{
5850
Mat blob;
@@ -124,7 +116,7 @@ class MPPersonDet {
124116
{
125117
rotBoxes[i] = Rect2d(Point2d(boxes.at<float>(i, 0), boxes.at<float>(i, 1)), Point2d(boxes.at<float>(i, 2), boxes.at<float>(i, 3)));
126118
}
127-
vector< int > keep;
119+
vector<int> keep;
128120
NMSBoxes(rotBoxes, score, this->scoreThreshold, this->nmsThreshold, keep, 1.0f, this->topK);
129121
if (keep.size() == 0)
130122
return Mat();
@@ -179,14 +171,6 @@ class MPPose {
179171
this->personBoxEnlargeFactor = 1.25;
180172
}
181173

182-
void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
183-
{
184-
this->backendId = bId;
185-
this->targetId = tId;
186-
this->net.setPreferableBackend(this->backendId);
187-
this->net.setPreferableTarget(this->targetId);
188-
}
189-
190174
tuple<Mat, Mat, float, Mat, Size> preprocess(Mat image, Mat person)
191175
{
192176
/***
@@ -567,7 +551,7 @@ int main(int argc, char** argv)
567551
MPPose poseEstimator(model, confThreshold, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
568552
//! [Open a video file or an image file or a camera stream]
569553
if (!cap.isOpened())
570-
CV_Error(Error::StsError, "Cannot opend video or file");
554+
CV_Error(Error::StsError, "Cannot open video or file");
571555

572556
static const std::string kWinName = "MPPose Demo";
573557
while (waitKey(1) < 0)

models/text_detection_ppocr/demo.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ int main(int argc, char** argv)
124124
else
125125
cap.open(0);
126126
if (!cap.isOpened())
127-
CV_Error(Error::StsError, "Cannot opend video or file");
127+
CV_Error(Error::StsError, "Cannot open video or file");
128128
Mat originalImage;
129129
static const std::string kWinName = modelName;
130130
while (waitKey(1) < 0)

models/text_recognition_crnn/demo.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ int main(int argc, char** argv)
224224
else
225225
cap.open(0);
226226
if (!cap.isOpened())
227-
CV_Error(Error::StsError, "Cannot opend video or file");
227+
CV_Error(Error::StsError, "Cannot open video or file");
228228
Mat originalImage;
229229
static const std::string kWinName = modelPath;
230230
while (waitKey(1) < 0)

models/text_recognition_crnn/demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def visualize(image, boxes, texts, color=(0, 255, 0), isClosed=True, thickness=2
106106

107107
# Save results if save is true
108108
if args.save:
109-
print('Resutls saved to result.jpg\n')
109+
print('Results saved to result.jpg\n')
110110
cv.imwrite('result.jpg', original_image)
111111

112112
# Visualize results in a new window

0 commit comments

Comments
 (0)