-
Notifications
You must be signed in to change notification settings - Fork 0
/
tensorflowLiteMobilenet2.py
104 lines (98 loc) · 3.33 KB
/
tensorflowLiteMobilenet2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import numpy as np
import tensorflow as tf
import cv2 as cv
# left = "/dev/v4l/by-path/platform-70090000.xusb-usb-0:2.1:1.0-video-index0"
#
# cam = cv.VideoCapture(left)
img = cv.imread('frame_00000.png')
cv.imshow('img', img)
# cv.waitKey(0)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="models/mobilenet.tflite")
print(interpreter)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
print("== Input details ==")
print("shape:", input_details[0]['shape'])
print("type:", input_details[0]['dtype'])
img = img.astype('uint8')
img = cv.resize(img, (300, 300))
print(img.shape)
input_image = np.asarray(img) #, dtype=np.float32)
input_image = np.expand_dims(input_image, 0)
print(input_image.shape)
# print(type(input_tensor))
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
scores = interpreter.get_tensor(output_details[2]['index'])
label = interpreter.get_tensor(output_details[1]['index'])
output_data = np.squeeze(output_data)
print(output_data)
scores = np.squeeze(scores)
label = np.squeeze(label)
print(label)
i = 0
# for box in output_data:
ymin = int(output_data[0][0]*300)
xmin = int(output_data[0][1]*300)
ymax = int(output_data[0][2]*300)
xmax = int(output_data[0][3]*300)
cv.rectangle(img, (xmin,ymin),(xmax,ymax), (0,0,255), thickness=4)
# img = np.squeeze(img, 0)
print(img.shape)
cv.imshow('img', img)
cv.waitKey(0)
# while True:
# val, img = cam.read()
# img = np.asarray(img, dtype=np.float32)
# #img = img.astype('uint8')
# img = cv.resize(img, (300,300))
# input_tensor = tf.convert_to_tensor(img)
# interpreter.set_tensor(input_details[0]['index'], input_tensor[tf.newaxis, ...])
# interpreter.invoke()
#
# output_data = interpreter.get_tensor(output_details[0]['index'])
# scores = interpreter.get_tensor(output_details[2]['index'])
# label = interpreter.get_tensor(output_details[1]['index'])
#
# output_data = np.squeeze(output_data)
# scores = np.squeeze(scores)
# label = np.squeeze(label)
# print(label)
#
#
# i = 0
# for box in output_data:
# if scores[i] > 0.6:
# ymin = int(box[0]*300)
# xmin = int(box[1]*300)
# ymax = int(box[2]*300)
# xmax = int(box[3]*300)
# cv.rectangle(img, (xmin,ymin),(xmax,ymax), (0,0,255))
# try:
# #if label[i] == 0:
# # print("person")
# #elif label[i] == 64:
# # print("mouse")
# obj_label = labels[int(label[i])]
# print(str(obj_label) + ": " + str(scores[i]))
# cv.putText(img, str(obj_label), (xmin,ymin), cv.FONT_HERSHEY_SIMPLEX, 1, 255)
# except Exception as e:
# print("unable to find match")
# i += 1
# #cv.imshow("camera1", img)
# if cv.waitKey(1) == 27:
# break
# cam.release()
# cv.destroyAllWindows()
# #interpreter.invoke()
#
# # The function `get_tensor()` returns a copy of the tensor data.
# # Use `tensor()` in order to get a pointer to the tensor.
# #output_data = interpreter.get_tensor(output_details[0]['index'])
# #print(output_data)