b35bd91eb9e71860d77aa22e203cd70e7ac2c241
[ealt-edge.git] / example-apps / ROBO / aPaaS / Obj_Detection_service / detection / obj_detection_service.py
1 #
2 # Copyright 2020 Huawei Technologies Co., Ltd.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 import os
17 import cv2
18 import config
19 from flask_sslify import SSLify
20 from flask import Flask, request, jsonify, Response, send_file
21 from flask_cors import CORS
22 from werkzeug import secure_filename
23
24 app = Flask(__name__)
25 CORS(app)
26 sslify = SSLify(app)
27 app.config['JSON_AS_ASCII'] = False
28 app.config['UPLOAD_PATH'] = '/usr/app/images/input/'
29 app.config['supports_credentials'] = True
30 app.config['CORS_SUPPORTS_CREDENTIALS'] = True
31 app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
32 ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
33 MODEL_PATH = '/usr/app/model/'
34 IMAGE_PATH = '/usr/app/images/result/'
35 count = 0
36 listOfMsgs = []
37
38
39 class model_info():
40     def __init__(self, model_name):
41         self.model_name = 'MobileNetSSD_deploy.caffemodel'
42         self.prototxt = 'MobileNetSSD_deploy.prototxt'
43         self.confidenceLevel = 0.2
44
45     def get_prototxt(self):
46         return self.prototxt
47
48     def get_model_name(self):
49         return self.model_name
50
51     def set_confidence_level(self, confidenceLevel):
52         self.confidenceLevel = confidenceLevel
53
54     def get_confidence_level(self):
55         return self.confidenceLevel
56
57     def update_model(self, model_loc, prototxt, model_name):
58         self.prototxt = prototxt
59         self.model_name = model_name
60
61
62 # Labels of Network.
63 classNames = {0: 'background',
64               1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat',
65               5: 'bottle', 6: 'bus', 7: 'car', 8: 'cat', 9: 'chair',
66               10: 'cow', 11: 'diningtable', 12: 'dog', 13: 'horse',
67               14: 'motorbike', 15: 'person', 16: 'pottedplant',
68               17: 'sheep', 18: 'sofa', 19: 'train', 20: 'tvmonitor'}
69
70
71 def allowed_file(filename):
72     return '.' in filename and filename.rsplit('.', 1)[1].lower() \
73        in ALLOWED_EXTENSIONS
74
75
76 # Obj-detection from input frame
77 def Detection(img):
78     print ('inside detection func')
79     modelInfo = model_info("caffe")
80     ConfPercent = modelInfo.get_confidence_level()
81     model_name = modelInfo.get_model_name()
82     prototxt_name = modelInfo.get_prototxt()
83
84     model = MODEL_PATH + model_name
85     prototxt = MODEL_PATH + prototxt_name
86     image = app.config['UPLOAD_PATH'] + img
87     label = 'bottels'
88     print(ConfPercent)
89     print(model)
90     print(prototxt)
91     print("image path is" + image)
92
93     # Load the Caffe model
94     net = cv2.dnn.readNetFromCaffe(prototxt, model)
95     # Load image fro
96     frame = cv2.imread(image)
97
98     frame_resized = cv2.resize(frame, (300, 300))  # resize frame for
99     # prediction
100     heightFactor = frame.shape[0]/300.0
101     widthFactor = frame.shape[1]/300.0
102
103     # MobileNet requires fixed dimensions for input image(s)
104     # so we have to ensure that it is resized to 300x300 pixels.
105     # set a scale factor to image because network the objects has
106     # differents size.
107     # We perform a mean subtraction (127.5, 127.5, 127.5)
108     # to normalize the input;
109     # after executing this command our "blob" now has the shape:
110     # (1, 3, 300, 300)
111     blob = cv2.dnn.blobFromImage(frame_resized, 0.007843, (300, 300),
112                                  (127.5, 127.5, 127.5), False)
113     # Set to network the input blob
114     net.setInput(blob)
115     # Prediction of network
116     detections = net.forward()
117
118     frame_copy = frame.copy()
119     # Size of frame resize (300x300)
120     cols = frame_resized.shape[1]
121     rows = frame_resized.shape[0]
122
123     # For get the class and location of object detected,
124     # There is a fix index for class, location and confidence
125     # value in @detections array .
126     for i in range(detections.shape[2]):
127         confidence = detections[0, 0, i, 2]  # Confidence of prediction
128         if confidence > ConfPercent:  # Filter prediction
129             class_id = int(detections[0, 0, i, 1])  # Class label
130
131             # Object location
132             xLeftBottom = int(detections[0, 0, i, 3] * cols)
133             yLeftBottom = int(detections[0, 0, i, 4] * rows)
134             xRightTop = int(detections[0, 0, i, 5] * cols)
135             yRightTop = int(detections[0, 0, i, 6] * rows)
136
137             xLeftBottom_ = int(widthFactor * xLeftBottom)
138             yLeftBottom_ = int(heightFactor * yLeftBottom)
139             xRightTop_ = int(widthFactor * xRightTop)
140             yRightTop_ = int(heightFactor * yRightTop)
141             # Draw location of object
142             cv2.rectangle(frame_resized, (xLeftBottom, yLeftBottom),
143                           (xRightTop, yRightTop),
144                           (0, 255, 0))
145
146             cv2.rectangle(frame_copy, (xLeftBottom_, yLeftBottom_),
147                           (xRightTop_, yRightTop_),
148                           (0, 255, 0), -1)
149     opacity = 0.3
150     cv2.addWeighted(frame_copy, opacity, frame, 1 - opacity, 0, frame)
151
152     count = 0
153     for i in range(detections.shape[2]):
154         confidence = detections[0, 0, i, 2]  # Confidence of prediction
155         if confidence > ConfPercent:  # Filter prediction
156             class_id = int(detections[0, 0, i, 1])  # Class label
157
158             # Object location
159             xLeftBottom = int(detections[0, 0, i, 3] * cols)
160             yLeftBottom = int(detections[0, 0, i, 4] * rows)
161             xRightTop = int(detections[0, 0, i, 5] * cols)
162             yRightTop = int(detections[0, 0, i, 6] * rows)
163
164             xLeftBottom_ = int(widthFactor * xLeftBottom)
165             yLeftBottom_ = int(heightFactor * yLeftBottom)
166             xRightTop_ = int(widthFactor * xRightTop)
167             yRightTop_ = int(heightFactor * yRightTop)
168             cv2.rectangle(frame, (xLeftBottom_, yLeftBottom_),
169                           (xRightTop_, yRightTop_),
170                           (0, 0, 0), 2)
171
172             # Draw label and confidence of prediction in frame resized
173             if class_id in classNames:
174                 label = classNames[class_id] + ": " + str(confidence)
175                 labelSize, baseLine = cv2.getTextSize(label,
176                                                       cv2.FONT_HERSHEY_TRIPLEX,
177                                                       0.8, 1)
178
179                 yLeftBottom_ = max(yLeftBottom_, labelSize[1])
180                 cv2.rectangle(
181                     frame,
182                     (xLeftBottom_, yLeftBottom_ - labelSize[1]),
183                     (xLeftBottom_ + labelSize[0], yLeftBottom_ + baseLine),
184                     (255, 255, 255), cv2.FILLED)
185                 cv2.putText(frame, label, (xLeftBottom_, yLeftBottom_),
186                             cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 0))
187                 print(label)
188                 count = count + 1
189
190     print("total item count", count)
191     # cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
192     print("before im write")
193     cv2.imwrite(IMAGE_PATH + "result.jpeg", frame)
194     # cv2.imshow("frame", frame)
195     # cv2.waitKey(0)
196     print("before im before destroy window")
197     # cv2.destroyAllWindows()
198     # Detect_result = {'ImposedImage': 'frame', 'ObjCount': count,
199     # 'ObjType': type, 'Time': time}
200     Detect_result = {'ObjCount': count}
201     print(Detect_result)
202     return Detect_result
203
204
205 @app.route('/mep/v1/obj_detection/uploadModel', methods=['POST'])
206 def uploadModel():
207     """
208     upload model
209     :return: html file
210     """
211     app.logger.info("Received message from ClientIP [" + request.remote_addr
212                     + "] Operation [" + request.method + "]" +
213                     " Resource [" + request.url + "]")
214
215     modelInfo = model_info("caffe")
216     modelInfo.update_model("caffe", "mobilenet_ssd", "prototext")
217     return Response("success")
218
219
220 @app.route('/mep/v1/obj_detection/confidencelevel', methods=['POST'])
221 def setConfidenceLevel():
222     """
223     Trigger the video_feed() function on opening "0.0.0.0:5000/video_feed" URL
224     :return:
225     """
226     app.logger.info("Received message from ClientIP [" + request.remote_addr
227                     + "] Operation [" + request.method + "]" +
228                     " Resource [" + request.url + "]")
229
230     confidenceLevel = 0.2
231     modelInfo = model_info("caffe")
232     modelInfo.set_confidence_level(confidenceLevel)
233     return Response("success")
234
235
236 @app.route('/mep/v1/obj_detection/detect', methods=['POST'])
237 def Obj_Detection():
238     """
239     Trigger the Obj detection on input frame/image
240     Input: frame/image
241     :return: imposed frame, count, Obj type, time taken by detection
242     """
243     app.logger.info("Received message from ClientIP [" + request.remote_addr
244                     + "] Operation [" + request.method + "]" +
245                     " Resource [" + request.url + "]")
246
247     if 'file' not in request.files:
248         raise IOError('No file')
249
250     file = request.files['file']
251     if file.filename == '':
252         app.logger.info('No file selected for uploading')
253         raise IOError('No file')
254     if file and allowed_file(file.filename):
255         filename = secure_filename(file.filename)
256         print('file name', filename)
257         file.save(os.path.join(app.config['UPLOAD_PATH'], filename))
258         app.logger.info('File successfully uploaded')
259         print('file path', app.config['UPLOAD_PATH'] + filename)
260         Detect_result = Detection(filename)
261     else:
262         app.logger.info('Allowed file types are txt, pdf, png, jpg, jpeg, gif')
263         return Response("failure")
264     return jsonify(Detect_result)
265
266
267 @app.route('/mep/v1/obj_detection/image', methods=['GET'])
268 def image_download():
269     """
270     Trigger the Obj detection on input frame/image
271     Input: frame/image
272     :return: imposed frame, count, Obj type, time taken by detection
273     """
274     app.logger.info("Received message from ClientIP [" + request.remote_addr
275                     + "] Operation [" + request.method + "]" +
276                     " Resource [" + request.url + "]")
277
278     return send_file(IMAGE_PATH + "result.jpeg",
279                      attachment_filename='result.jpeg')
280
281
282 def start_server(handler):
283     app.logger.addHandler(handler)
284     if config.ssl_enabled:
285         context = (config.ssl_certfilepath, config.ssl_keyfilepath)
286         app.run(host=config.server_address, debug=True, ssl_context=context,
287                 threaded=True, port=config.server_port)
288     else:
289         app.run(host=config.server_address, debug=True, threaded=True,
290                 port=config.server_port)