|
|
|
@ -63,6 +63,9 @@ class DETECTION(Structure): |
|
|
|
|
("uc", POINTER(c_float)), |
|
|
|
|
("points", c_int)] |
|
|
|
|
|
|
|
|
|
class DETNUMPAIR(Structure): |
|
|
|
|
_fields_ = [("num", c_int), |
|
|
|
|
("dets", POINTER(DETECTION))] |
|
|
|
|
|
|
|
|
|
class IMAGE(Structure): |
|
|
|
|
_fields_ = [("w", c_int), |
|
|
|
@ -161,6 +164,9 @@ make_network_boxes.restype = POINTER(DETECTION) |
|
|
|
|
free_detections = lib.free_detections |
|
|
|
|
free_detections.argtypes = [POINTER(DETECTION), c_int] |
|
|
|
|
|
|
|
|
|
free_batch_detections = lib.free_batch_detections |
|
|
|
|
free_batch_detections.argtypes = [POINTER(DETNUMPAIR), c_int] |
|
|
|
|
|
|
|
|
|
free_ptrs = lib.free_ptrs |
|
|
|
|
free_ptrs.argtypes = [POINTER(c_void_p), c_int] |
|
|
|
|
|
|
|
|
@ -210,6 +216,11 @@ predict_image_letterbox = lib.network_predict_image_letterbox |
|
|
|
|
predict_image_letterbox.argtypes = [c_void_p, IMAGE] |
|
|
|
|
predict_image_letterbox.restype = POINTER(c_float) |
|
|
|
|
|
|
|
|
|
network_predict_batch = lib.network_predict_batch |
|
|
|
|
network_predict_batch.argtypes = [c_void_p, IMAGE, c_int, c_int, c_int, |
|
|
|
|
c_float, c_float, POINTER(c_int), c_int, c_int] |
|
|
|
|
network_predict_batch.restype = POINTER(DETNUMPAIR) |
|
|
|
|
|
|
|
|
|
def array_to_image(arr): |
|
|
|
|
import numpy as np |
|
|
|
|
# need to return old values to avoid python freeing memory |
|
|
|
@ -445,5 +456,72 @@ def performDetect(imagePath="data/dog.jpg", thresh= 0.25, configPath = "./cfg/yo |
|
|
|
|
print("Unable to show image: "+str(e)) |
|
|
|
|
return detections |
|
|
|
|
|
|
|
|
|
def performBatchDetect(thresh= 0.25, configPath = "./cfg/yolov3.cfg", weightPath = "yolov3.weights", metaPath= "./cfg/coco.data", hier_thresh=.5, nms=.45, batch_size=3): |
|
|
|
|
import cv2 |
|
|
|
|
import numpy as np |
|
|
|
|
# NB! Image sizes should be the same |
|
|
|
|
# You can change the images, yet, be sure that they have the same width and height |
|
|
|
|
img_samples = ['data/person.jpg', 'data/person.jpg', 'data/person.jpg'] |
|
|
|
|
image_list = [cv2.imread(k) for k in img_samples] |
|
|
|
|
|
|
|
|
|
net = load_net_custom(configPath.encode('utf-8'), weightPath.encode('utf-8'), 0, batch_size) |
|
|
|
|
meta = load_meta(metaPath.encode('utf-8')) |
|
|
|
|
pred_height, pred_width, c = image_list[0].shape |
|
|
|
|
net_width, net_height = (network_width(net), network_height(net)) |
|
|
|
|
img_list = [] |
|
|
|
|
for custom_image_bgr in image_list: |
|
|
|
|
custom_image = cv2.cvtColor(custom_image_bgr, cv2.COLOR_BGR2RGB) |
|
|
|
|
custom_image = cv2.resize( |
|
|
|
|
custom_image, (net_width, net_height), interpolation=cv2.INTER_NEAREST) |
|
|
|
|
custom_image = custom_image.transpose(2, 0, 1) |
|
|
|
|
img_list.append(custom_image) |
|
|
|
|
|
|
|
|
|
arr = np.concatenate(img_list, axis=0) |
|
|
|
|
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0 |
|
|
|
|
data = arr.ctypes.data_as(POINTER(c_float)) |
|
|
|
|
im = IMAGE(net_width, net_height, c, data) |
|
|
|
|
|
|
|
|
|
batch_dets = network_predict_batch(net, im, batch_size, pred_width, |
|
|
|
|
pred_height, thresh, hier_thresh, None, 0, 0) |
|
|
|
|
batch_boxes = [] |
|
|
|
|
batch_scores = [] |
|
|
|
|
batch_classes = [] |
|
|
|
|
for b in range(batch_size): |
|
|
|
|
num = batch_dets[b].num |
|
|
|
|
dets = batch_dets[b].dets |
|
|
|
|
if nms: |
|
|
|
|
do_nms_obj(dets, num, meta.classes, nms) |
|
|
|
|
boxes = [] |
|
|
|
|
scores = [] |
|
|
|
|
classes = [] |
|
|
|
|
for i in range(num): |
|
|
|
|
det = dets[i] |
|
|
|
|
score = -1 |
|
|
|
|
label = None |
|
|
|
|
for c in range(det.classes): |
|
|
|
|
p = det.prob[c] |
|
|
|
|
if p > score: |
|
|
|
|
score = p |
|
|
|
|
label = c |
|
|
|
|
if score > thresh: |
|
|
|
|
box = det.bbox |
|
|
|
|
left, top, right, bottom = map(int,(box.x - box.w / 2, box.y - box.h / 2, |
|
|
|
|
box.x + box.w / 2, box.y + box.h / 2)) |
|
|
|
|
boxes.append((top, left, bottom, right)) |
|
|
|
|
scores.append(score) |
|
|
|
|
classes.append(label) |
|
|
|
|
boxColor = (int(255 * (1 - (score ** 2))), int(255 * (score ** 2)), 0) |
|
|
|
|
cv2.rectangle(image_list[b], (left, top), |
|
|
|
|
(right, bottom), boxColor, 2) |
|
|
|
|
cv2.imwrite(os.path.basename(img_samples[b]),image_list[b]) |
|
|
|
|
|
|
|
|
|
batch_boxes.append(boxes) |
|
|
|
|
batch_scores.append(scores) |
|
|
|
|
batch_classes.append(classes) |
|
|
|
|
free_batch_detections(batch_dets, batch_size) |
|
|
|
|
return batch_boxes, batch_scores, batch_classes |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
print(performDetect()) |
|
|
|
|
#Uncomment the following line to see batch inference working |
|
|
|
|
#print(performBatchDetect()) |