Using LRE to Deploy YOLO models¶
You have compiled or exported a YOLO model with LEIP Optimize, and now you want to deploy it in a target environment.
Runtime Setup¶
We will compare two models, such as a FP32 model and an INT8 model.
case1_output_dir = "optimized_outputs/notebook_trt_fp32"
case2_output_dir = "optimized_outputs/notebook_trt_int8"
Let's first gather dependencies needed for our simple deployment.
import os
import urllib.request
from pylre import LatentRuntimeEngine as LRE
import numpy as np
import cv2
import torch
from ultralytics.utils import yaml_load
from ultralytics.utils.checks import check_yaml
import matplotlib.pyplot as plt
import time
Then we will download an image and a labels file for visualization.
if not os.path.exists("coco8.yaml"):
print("Downloading coco8.yaml from ultralytics")
url = "https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco8.yaml"
file_path = "coco8.yaml"
urllib.request.urlretrieve(url, file_path)
if not os.path.exists("bus.jpg"):
print("Downloading bus.jpg from ultralytics")
url = "https://ultralytics.com/images/bus.jpg"
file_path = "bus.jpg"
urllib.request.urlretrieve(url, file_path)
We load the model with OpenCV.
img = cv2.imread("bus.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
We define post and preprocessing functions for our model.
def preprocess(input_img):
"""
Preprocesses the input image before performing inference.
Returns:
image_data: Preprocessed image data ready for inference.
"""
# Get the height and width of the input image
img_height, img_width, _ = input_img.shape
# Convert the image color space from BGR to RGB
img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
# Resize the image to match the input shape
img = cv2.resize(img, (640, 640))
# Normalize the image data by dividing it by 255.0
image_data = np.array(img) / 255.0
# Transpose the image to have the channel dimension as the first dimension
image_data = np.transpose(image_data, (2, 0, 1)) # Channel first
# Expand the dimensions of the image data to match the expected input shape
image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
# Return the preprocessed image data
return image_data
def postprocess(input_img, output):
"""
Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs.
Args:
input_image (numpy.ndarray): The input image.
output (numpy.ndarray): The output of the model.
Returns:
numpy.ndarray: The input image with detections drawn on it.
"""
def draw_detections(img, box, score, class_id):
"""
Draws bounding boxes and labels on the input image based on the detected objects.
Args:
img: The input image to draw detections on.
box: Detected bounding box.
score: Corresponding detection score.
class_id: Class ID for the detected object.
Returns:
None
"""
# Extract the coordinates of the bounding box
# print(classes[class_id])
x1, y1, w, h = box
classes = yaml_load(check_yaml("coco8.yaml"))["names"]
color_palette = np.random.uniform(0, 255, size=(len(classes), 3))
# Retrieve the color for the class ID
color = color_palette[class_id]
# Draw the bounding box on the image
cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
# Create the label text with class name and score
label = f"{classes[class_id]}: {score:.2f}"
# Calculate the dimensions of the label text
(label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
# Calculate the position of the label text
label_x = x1
label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
# Draw a filled rectangle as the background for the label text
cv2.rectangle(
img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
)
# Draw the label text on the image
cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
# Transpose and squeeze the output to match the expected shape
outputs = np.transpose(np.squeeze(output[0]))
confidence_thres = 0.5
iou_thres = 0.5
# Get the number of rows in the outputs array
rows = outputs.shape[0]
# Lists to store the bounding boxes, scores, and class IDs of the detections
boxes = []
scores = []
class_ids = []
img_height, img_width, _ = img.shape
# Calculate the scaling factors for the bounding box coordinates
x_factor = img_width / 640
y_factor = img_height / 640
# Iterate over each row in the outputs array
for i in range(rows):
# Extract the class scores from the current row
classes_scores = outputs[i][4:]
# print(np.max(classes_scores.numpy(), 0))
# Find the maximum score among the class scores
max_score = np.max(classes_scores.numpy(), 0)
# If the maximum score is above the confidence threshold
if max_score >= confidence_thres:
# Get the class ID with the highest score
class_id = np.argmax(classes_scores.numpy())
# Extract the bounding box coordinates from the current row
x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
# Calculate the scaled coordinates of the bounding box
left = int((x - w / 2) * x_factor)
top = int((y - h / 2) * y_factor)
width = int(w * x_factor)
height = int(h * y_factor)
# Add the class ID, score, and box coordinates to the respective lists
class_ids.append(class_id)
scores.append(max_score)
boxes.append([left, top, width, height])
# Apply non-maximum suppression to filter out overlapping bounding boxes
indices = cv2.dnn.NMSBoxes(boxes, scores, confidence_thres, iou_thres)
# Iterate over the selected indices after non-maximum suppression
for i in indices:
# Get the box, score, and class ID corresponding to the index
box = boxes[i]
score = scores[i]
class_id = class_ids[i]
# Draw the detection on the input image
draw_detections(input_img, box, score, class_id)
# Return the modified input image
return input_img
preprocessed_img = preprocess(img)
LRE expects the input to be in contiguous memory.
preprocessed_img = np.ascontiguousarray(preprocessed_img)
Load the first model and run an inference.
lre_case1 = LRE(f"{case1_output_dir}/modelLibrary.so")
output_case1 = lre_case1(preprocessed_img)
output_case1_torch = torch.from_dlpack(output_case1[0])
Load the second model and run an inference.
lre_case2 = LRE(f"{case2_output_dir}/modelLibrary.so")
output_case2 = lre_case2(preprocessed_img)
output_case2_torch = torch.from_dlpack(output_case2[0])
post_processed_img_case1 = postprocess(img.copy(), output_case1_torch.cpu())
post_processed_img_case2 = postprocess(img.copy(), output_case2_torch.cpu())
fig, axs = plt.subplots(1, 2, figsize=(12, 12))
axs[0].imshow(post_processed_img_case1)
axs[0].set_title("Case 1")
axs[0].axis(False)
axs[1].imshow(post_processed_img_case2)
axs[1].set_title("Case 2")
axs[1].axis(False)
Speed measurements¶
def speed_test(lre, sample_input, iterations):
print('==== Speed Testing ====')
if lre.is_trt:
lre.warm_up(10)
t_start = time.time()
for _ in range(iterations):
lre.infer(sample_input)
elapsed_time = time.time() - t_start
latency = elapsed_time / iterations
fps = iterations / elapsed_time
print()
print(f"FPS: {np.round(fps, 2)}; Latency: {np.round(latency, 2)}s")
return
speed_test(lre_case1, preprocessed_img, 2)
speed_test(lre_case2, preprocessed_img, 2)
We selected an FP32 model for case one, which demonstrates higher detection accuracy with more precise bounding boxes and higher confidence scores compared to the INT8 model we selected as case two. The reduced accuracy in the INT8 model is a trade-off for faster inference speeds and lower computational resources.