Computer vision YOLO11 model
This commit is contained in:
94
Test_Realtime.py
Normal file
94
Test_Realtime.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Camera index (default camera is 0)
|
||||
camera_index = 0
|
||||
|
||||
# Load the YOLO model
|
||||
model = YOLO(r"D:\AIM\lemon\runs\detect\train4\weights\best.pt") # Load custom model
|
||||
|
||||
# Initialize the camera
|
||||
cap = cv2.VideoCapture(camera_index)
|
||||
if not cap.isOpened():
|
||||
print("Unable to open the camera. Please check the device.")
|
||||
exit()
|
||||
|
||||
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
print(f"Camera resolution: {width}x{height}, FPS: {fps} FPS")
|
||||
|
||||
# Dictionary to track the state of each lemon
|
||||
lemon_states = {} # Format: {ID: "State"}
|
||||
|
||||
# Define class labels
|
||||
class_labels = {
|
||||
0: "Bruised",
|
||||
1: "DefectiveLemon",
|
||||
2: "GoodLemon",
|
||||
3: "NotRipeLemon",
|
||||
4: "Rotten"
|
||||
}
|
||||
|
||||
# Classes that require ID assignment
|
||||
id_tracked_classes = ["DefectiveLemon", "GoodLemon", "NotRipeLemon"]
|
||||
|
||||
# Set the window to be resizable
|
||||
cv2.namedWindow("Live Detection", cv2.WINDOW_NORMAL)
|
||||
|
||||
# Process video stream in real-time
|
||||
while cap.isOpened():
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
print("Unable to read camera input. Terminating program.")
|
||||
break
|
||||
|
||||
# Perform object tracking using BoT-SORT
|
||||
results = model.track(source=frame, conf=0.5, tracker='botsort.yaml', show=False)
|
||||
|
||||
for result in results:
|
||||
frame = result.orig_img # Current frame
|
||||
detections = result.boxes # Detection box information
|
||||
|
||||
for box in detections:
|
||||
x1, y1, x2, y2 = map(int, box.xyxy[0]) # Detection box coordinates
|
||||
obj_id = int(box.id) if box.id is not None else -1 # Tracking object ID
|
||||
class_id = int(box.cls) # Class ID
|
||||
score = box.conf # Confidence score
|
||||
label = class_labels.get(class_id, "Unknown") # Get class label
|
||||
|
||||
# Update lemon state and output information for tracked boxes
|
||||
if obj_id != -1 and label in id_tracked_classes:
|
||||
if obj_id not in lemon_states:
|
||||
lemon_states[obj_id] = label
|
||||
else:
|
||||
# Once detected as "DefectiveLemon," the state remains "DefectiveLemon"
|
||||
if lemon_states[obj_id] != "DefectiveLemon":
|
||||
lemon_states[obj_id] = label
|
||||
|
||||
# Output ID, position, and label
|
||||
position = f"({x1}, {y1}, {x2}, {y2})"
|
||||
print(f"ID: {obj_id}, Position: {position}, Label: {lemon_states[obj_id]}")
|
||||
|
||||
# Draw detection boxes and labels (including untracked ones)
|
||||
if obj_id != -1 and label in id_tracked_classes:
|
||||
display_text = f"ID {obj_id} | {lemon_states[obj_id]}"
|
||||
else:
|
||||
display_text = label # For untracked labels, only show the class
|
||||
|
||||
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
||||
cv2.putText(frame, display_text, (x1, y1 - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
||||
|
||||
# Display the processed video stream
|
||||
cv2.imshow("Live Detection", frame)
|
||||
|
||||
# Exit the loop when ESC key is pressed
|
||||
if cv2.waitKey(1) & 0xFF == 27: # 27 is the ASCII value for ESC key
|
||||
print("ESC key detected. Exiting the program.")
|
||||
break
|
||||
|
||||
# Release resources
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
print("Camera video processing complete. Program terminated.")
|
||||
Reference in New Issue
Block a user