|
| 1 | +import cv2 |
| 2 | + |
| 3 | +import time |
| 4 | +from gpiozero import AngularServo |
| 5 | +servo =AngularServo(18, initial_angle=0, min_pulse_width=0.0006, max_pulse_width=0.0023) |
| 6 | + |
| 7 | +#thres = 0.45 # Threshold to detect object |
| 8 | + |
| 9 | +classNames = [] |
| 10 | +classFile = "/home/debug.tsang/Documents/Object_Detection_Files/coco.names" |
| 11 | +with open(classFile,"rt") as f: |
| 12 | + classNames = f.read().rstrip("\n").split("\n") |
| 13 | + |
| 14 | +configPath = "/home/debug.tsang/Documents/Object_Detection_Files/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt" |
| 15 | +weightsPath = "/home/debug.tsang/Documents/Object_Detection_Files/frozen_inference_graph.pb" |
| 16 | + |
| 17 | +net = cv2.dnn_DetectionModel(weightsPath,configPath) |
| 18 | +net.setInputSize(320,320) |
| 19 | +net.setInputScale(1.0/ 127.5) |
| 20 | +net.setInputMean((127.5, 127.5, 127.5)) |
| 21 | +net.setInputSwapRB(True) |
| 22 | + |
| 23 | + |
| 24 | +def getObjects(img, thres, nms, draw=True, objects=[]): |
| 25 | + classIds, confs, bbox = net.detect(img,confThreshold=thres,nmsThreshold=nms) |
| 26 | + #print(classIds,bbox) |
| 27 | + if len(objects) == 0: objects = classNames |
| 28 | + objectInfo =[] |
| 29 | + if len(classIds) != 0: |
| 30 | + for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox): |
| 31 | + className = classNames[classId - 1] |
| 32 | + if className in objects: |
| 33 | + objectInfo.append([box,className]) |
| 34 | + if (draw): |
| 35 | + cv2.rectangle(img,box,color=(0,255,0),thickness=2) |
| 36 | + cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30), |
| 37 | + cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) |
| 38 | + cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30), |
| 39 | + cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) |
| 40 | + |
| 41 | + servo.angle = -90 |
| 42 | + time.sleep = 2 |
| 43 | + servo.angle = 90 |
| 44 | + |
| 45 | + return img,objectInfo |
| 46 | + |
| 47 | + |
| 48 | +if __name__ == "__main__": |
| 49 | + |
| 50 | + cap = cv2.VideoCapture(0) |
| 51 | + cap.set(3,640) |
| 52 | + cap.set(4,480) |
| 53 | + #cap.set(10,70) |
| 54 | + |
| 55 | + |
| 56 | + while True: |
| 57 | + success, img = cap.read() |
| 58 | + result, objectInfo = getObjects(img,0.45,0.2, objects=['cup','horse']) |
| 59 | + #print(objectInfo) |
| 60 | + |
| 61 | + |
| 62 | + |
| 63 | + cv2.imshow("Output",img) |
| 64 | + cv2.waitKey(1) |
| 65 | + |
0 commit comments