Skip to content
Snippets Groups Projects
Commit 070b56c4 authored by Erkan Karabulut's avatar Erkan Karabulut
Browse files

add missing comment lines and references

parent 198dc2e8
No related branches found
No related tags found
No related merge requests found
......@@ -27,6 +27,8 @@ export class TSMatchController extends Controller {
onCommunicationManagerStarting() {
super.onCommunicationManagerStarting();
// subscribe to thing discovery, deadvertisement and observation topics
this.communicationManager
.observeRaw(process.env.TOPIC_DISCOVERY)
.subscribe(discoveredSensor => {
......@@ -46,6 +48,10 @@ export class TSMatchController extends Controller {
});
}
/**
* Subscribe to service request and response topics and advertise them
* @param rawObs
*/
registerRaw(rawObs) {
rawObs.subscribe(([topic, payload]) => {
this.communicationManager
......@@ -59,6 +65,10 @@ export class TSMatchController extends Controller {
});
}
/**
* Subscribe to service request deletion topic and advertise them
* @param rawUnsubscribeRaw
*/
unsubscribeRequestRaw(rawUnsubscribeRaw) {
rawUnsubscribeRaw.subscribe(([topic, payload]) => {
this.communicationManager
......@@ -74,6 +84,11 @@ export class TSMatchController extends Controller {
return this._results.asObservable();
}
/**
* Subscribe to sensor list request and response topics and re-advertise them
* @param rawAllSensors
* @constructor
*/
GetAllSensorsRaw(rawAllSensors) {
rawAllSensors.subscribe(([topic, payload]) => {
this.communicationManager
......
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to run object detection with a TensorFlow Lite model."""
import platform
from typing import List, NamedTuple
import zipfile
import cv2
import numpy as np
# pylint: disable=g-import-not-at-top
try:
# Import TFLite interpreter from tflite_runtime package if it's available.
from tflite_runtime.interpreter import Interpreter
from tflite_runtime.interpreter import load_delegate
except ImportError:
# If not, fallback to use the TFLite interpreter from the full TF package.
import tensorflow as tf
Interpreter = tf.lite.Interpreter
load_delegate = tf.lite.experimental.load_delegate
# pylint: enable=g-import-not-at-top
class ObjectDetectorOptions(NamedTuple):
"""A config to initialize an object detector."""
enable_edgetpu: bool = False
"""Enable the model to run on EdgeTPU."""
label_allow_list: List[str] = None
"""The optional allow list of labels."""
label_deny_list: List[str] = None
"""The optional deny list of labels."""
max_results: int = -1
"""The maximum number of top-scored detection results to return."""
num_threads: int = 1
"""The number of CPU threads to be used."""
score_threshold: float = 0.0
"""The score threshold of detection results to return."""
class Rect(NamedTuple):
"""A rectangle in 2D space."""
left: float
top: float
right: float
bottom: float
class Category(NamedTuple):
"""A result of a classification task."""
label: str
score: float
index: int
class Detection(NamedTuple):
"""A detected object as the result of an ObjectDetector."""
bounding_box: Rect
categories: List[Category]
def edgetpu_lib_name():
"""Returns the library name of EdgeTPU in the current platform."""
return {
'Darwin': 'libedgetpu.1.dylib',
'Linux': 'libedgetpu.so.1',
'Windows': 'edgetpu.dll',
}.get(platform.system(), None)
class ObjectDetector:
"""A wrapper class for a TFLite object detection model."""
_mean = 127.5
"""Default mean normalization parameter for float model."""
_std = 127.5
"""Default std normalization parameter for float model."""
_OUTPUT_LOCATION_NAME = 'location'
_OUTPUT_CATEGORY_NAME = 'category'
_OUTPUT_SCORE_NAME = 'score'
_OUTPUT_NUMBER_NAME = 'number of detections'
def __init__(
self,
model_path: str,
options: ObjectDetectorOptions = ObjectDetectorOptions()
) -> None:
"""Initialize a TFLite object detection model.
Args:
model_path: Path to the TFLite model.
options: The config to initialize an object detector. (Optional)
Raises:
ValueError: If the TFLite model is invalid.
OSError: If the current OS isn't supported by EdgeTPU.
"""
# Load label list from metadata.
try:
with zipfile.ZipFile(model_path) as model_with_metadata:
if not model_with_metadata.namelist():
raise ValueError('Invalid TFLite model: no label file found.')
file_name = model_with_metadata.namelist()[0]
with model_with_metadata.open(file_name) as label_file:
label_list = label_file.read().splitlines()
self._label_list = [label.decode('ascii') for label in label_list]
except zipfile.BadZipFile:
print(
'ERROR: Please use models trained with Model Maker or downloaded from TensorFlow Hub.'
)
raise ValueError('Invalid TFLite model: no metadata found.')
# Initialize TFLite model.
if options.enable_edgetpu:
if edgetpu_lib_name() is None:
raise OSError("The current OS isn't supported by Coral EdgeTPU.")
interpreter = Interpreter(
model_path=model_path,
experimental_delegates=[load_delegate(edgetpu_lib_name())],
num_threads=options.num_threads)
else:
interpreter = Interpreter(
model_path=model_path, num_threads=options.num_threads)
interpreter.allocate_tensors()
input_detail = interpreter.get_input_details()[0]
# From TensorFlow 2.6, the order of the outputs become undefined.
# Therefore we need to sort the tensor indices of TFLite outputs and to know
# exactly the meaning of each output tensor. For example, if
# output indices are [601, 599, 598, 600], tensor names and indices aligned
# are:
# - location: 598
# - category: 599
# - score: 600
# - detection_count: 601
# because of the op's ports of TFLITE_DETECTION_POST_PROCESS
# (https://github.com/tensorflow/tensorflow/blob/a4fe268ea084e7d323133ed7b986e0ae259a2bc7/tensorflow/lite/kernels/detection_postprocess.cc#L47-L50).
sorted_output_indices = sorted(
[output['index'] for output in interpreter.get_output_details()])
self._output_indices = {
self._OUTPUT_LOCATION_NAME: sorted_output_indices[0],
self._OUTPUT_CATEGORY_NAME: sorted_output_indices[1],
self._OUTPUT_SCORE_NAME: sorted_output_indices[2],
self._OUTPUT_NUMBER_NAME: sorted_output_indices[3],
}
self._input_size = input_detail['shape'][2], input_detail['shape'][1]
self._is_quantized_input = input_detail['dtype'] == np.uint8
self._interpreter = interpreter
self._options = options
def detect(self, input_image: np.ndarray) -> List[Detection]:
"""Run detection on an input image.
Args:
input_image: A [height, width, 3] RGB image. Note that height and width
can be anything since the image will be immediately resized according
to the needs of the model within this function.
Returns:
A Person instance.
"""
image_height, image_width, _ = input_image.shape
input_tensor = self._preprocess(input_image)
self._set_input_tensor(input_tensor)
self._interpreter.invoke()
# Get all output details
boxes = self._get_output_tensor(self._OUTPUT_LOCATION_NAME)
classes = self._get_output_tensor(self._OUTPUT_CATEGORY_NAME)
scores = self._get_output_tensor(self._OUTPUT_SCORE_NAME)
count = int(self._get_output_tensor(self._OUTPUT_NUMBER_NAME))
return self._postprocess(boxes, classes, scores, count, image_width,
image_height)
def _preprocess(self, input_image: np.ndarray) -> np.ndarray:
"""Preprocess the input image as required by the TFLite model."""
# Resize the input
input_tensor = cv2.resize(input_image, self._input_size)
# Normalize the input if it's a float model (aka. not quantized)
if not self._is_quantized_input:
input_tensor = (np.float32(input_tensor) - self._mean) / self._std
# Add batch dimension
input_tensor = np.expand_dims(input_tensor, axis=0)
return input_tensor
def _set_input_tensor(self, image):
"""Sets the input tensor."""
tensor_index = self._interpreter.get_input_details()[0]['index']
input_tensor = self._interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def _get_output_tensor(self, name):
"""Returns the output tensor at the given index."""
output_index = self._output_indices[name]
tensor = np.squeeze(self._interpreter.get_tensor(output_index))
return tensor
def _postprocess(self, boxes: np.ndarray, classes: np.ndarray,
scores: np.ndarray, count: int, image_width: int,
image_height: int) -> List[Detection]:
"""Post-process the output of TFLite model into a list of Detection objects.
Args:
boxes: Bounding boxes of detected objects from the TFLite model.
classes: Class index of the detected objects from the TFLite model.
scores: Confidence scores of the detected objects from the TFLite model.
count: Number of detected objects from the TFLite model.
image_width: Width of the input image.
image_height: Height of the input image.
Returns:
A list of Detection objects detected by the TFLite model.
"""
results = []
# Parse the model output into a list of Detection entities.
for i in range(count):
if scores[i] >= self._options.score_threshold:
y_min, x_min, y_max, x_max = boxes[i]
bounding_box = Rect(
top=int(y_min * image_height),
left=int(x_min * image_width),
bottom=int(y_max * image_height),
right=int(x_max * image_width))
class_id = int(classes[i])
category = Category(
score=scores[i],
label=self._label_list[class_id], # 0 is reserved for background
index=class_id)
result = Detection(bounding_box=bounding_box, categories=[category])
results.append(result)
# Sort detection results by score ascending
sorted_results = sorted(
results,
key=lambda detection: detection.categories[0].score,
reverse=True)
# Filter out detections in deny list
filtered_results = sorted_results
if self._options.label_deny_list is not None:
filtered_results = list(
filter(
lambda detection: detection.categories[0].label not in self.
_options.label_deny_list, filtered_results))
# Keep only detections in allow list
if self._options.label_allow_list is not None:
filtered_results = list(
filter(
lambda detection: detection.categories[0].label in self._options.
label_allow_list, filtered_results))
# Only return maximum of max_results detection.
if self._options.max_results > 0:
result_count = min(len(filtered_results), self._options.max_results)
filtered_results = filtered_results[:result_count]
return filtered_results
\ No newline at end of file
"""
Copyright (C) 2021 fortiss GmbH
@author Erkan Karabulut – karabulut@fortiss.org
@version 1.0
Detect humans
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import cv2
from object_detector import ObjectDetector
from object_detector import ObjectDetectorOptions
import rospy
import argparse
import io
import time
import numpy as np
from matplotlib import cm
from sensor_msgs.msg import CompressedImage
from PIL import Image
from tflite_runtime.interpreter import Interpreter
model = None
def detect(image: Image) -> None:
"""
Continuously run inference on images acquired from the camera.
"""
# Variables to calculate FPS
counter, fps = 0, 0
start_time = time.time()
# Visualization parameters
row_size = 20 # pixels
left_margin = 24 # pixels
text_color = (0, 0, 255) # red
font_size = 1
font_thickness = 1
fps_avg_frame_count = 10
# Initialize the object detection model
options = ObjectDetectorOptions(
num_threads=1,
score_threshold=0.3,
max_results=3,
enable_edgetpu=False)
detector = ObjectDetector(model_path=model, options=options)
counter += 1
image = np.array(image)
# Run object detection estimation using the model.
detections = detector.detect(image)
person_count = 0
for detection in detections:
for category in detection.categories:
if category.label == "person":
person_count = person_count + 1
print(category.label)
print(person_count)
sys.stdout.flush()
# Calculate the FPS
#if counter % fps_avg_frame_count == 0:
# end_time = time.time()
# fps = fps_avg_frame_count / (end_time - start_time)
# start_time = time.time()
#
## Show the FPS
#fps_text = 'FPS = {:.1f}'.format(fps)
#text_location = (left_margin, row_size)
#cv2.putText(fps_text, text_location, cv2.FONT_HERSHEY_PLAIN,
# font_size, text_color, font_thickness)
def image_received(raw_compressed_image):
image = Image.open(io.BytesIO(bytearray(raw_compressed_image.data))). \
convert('RGB')
# image.save("image" + str(raw_compressed_image.header.stamp) + ".jpeg")
detect(image=image)
if __name__ == '__main__':
rospy.init_node('human_detection', anonymous=True)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--model',
help='Path of the object detection model.',
required=False,
default='efficientdet_lite0.tflite')
parser.add_argument(
'--cameraId', help='Id of camera.', required=False, type=int, default=0)
parser.add_argument(
'--frameWidth',
help='Width of frame to capture from camera.',
required=False,
type=int,
default=640)
parser.add_argument(
'--frameHeight',
help='Height of frame to capture from camera.',
required=False,
type=int,
default=480)
parser.add_argument(
'--numThreads',
help='Number of CPU threads to run the model.',
required=False,
type=int,
default=4)
parser.add_argument(
'--enableEdgeTPU',
help='Whether to run the model on EdgeTPU.',
action='store_true',
required=False,
default=False)
parser.add_argument(
'--framerate',
help='Frame rate',
required=False,
type=int,
default=1)
args = parser.parse_args()
model = args.model
sub = rospy.Subscriber('/raspicam_node/image/compressed', CompressedImage, image_received)
rospy.spin()
# Source: https://github.com/tensorflow/examples/blob/master/lite/examples/object_detection/raspberry_pi/object_detector.py
# Dataset (efficientdet_lite0.tflite): https://github.com/tensorflow/examples/blob/master/lite/examples/object_detection/raspberry_pi/setup.sh
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
......
......@@ -2,8 +2,10 @@
Copyright (C) 2021 fortiss GmbH
@author Erkan Karabulut – karabulut@fortiss.org
@version 1.0
Detect humans
Read image data from raspicam camera node and detect humans in the data using object_detector.py
Benefited from: https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/raspberry_pi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
......@@ -69,17 +71,6 @@ def detect(image: Image) -> None:
print(person_count)
sys.stdout.flush()
# Calculate the FPS
# if counter % fps_avg_frame_count == 0:
# end_time = time.time()
# fps = fps_avg_frame_count / (end_time - start_time)
# start_time = time.time()
# Show the FPS
# fps_text = 'FPS = {:.1f}'.format(fps)
# text_location = (left_margin, row_size)
# cv2.putText(fps_text, text_location, cv2.FONT_HERSHEY_PLAIN,
# font_size, text_color, font_thickness)
def image_received(raw_compressed_image):
......
......@@ -36,7 +36,7 @@ export class Occupancy {
},
observationType: ObservationTypes.MEASUREMENT,
observedProperty: {
name: "Headcount",
name: "Occupancy",
description: "The number of people",
definition: "A link to the definition",
},
......
......@@ -150,7 +150,6 @@ export default class MQTTClientService {
let topic = message.destinationName;
let payload = JSON.parse(message.payloadString);
console.log("message published");
// handle the message according to it's topic
switch (topic) {
case Constants.constants.DISCOVERY_TOPIC:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment