相比YOLOv8,v10不再使用NMS,预测过程也变得简单。
import onnxruntime as ort
import matplotlib.pyplot as plt
import numpy as np
import cv2
onnx_model = ort.InferenceSession("model_path",providers=['CPUExecutionProvider'])
def ratioresize(im, color=255):
shape = im.shape[:2]
new_h, new_w = 640,640
padded_img = np.ones((new_h, new_w, 3), dtype=np.uint8) * color
r = min(new_h / shape[0], new_w / shape[1])
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
if shape[::-1] != new_unpad:
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
padded_img[: new_unpad[1], : new_unpad[0]] = im
padded_img = np.ascontiguousarray(padded_img)
return padded_img, 1 / r
模型预测,结果形状为[1,300,6],每一行前4位为候选框坐标xyxy,第五位为conf,第六位为类别,结果已经按照conf由大到小排列。
image = cv2.imread("image_path")
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
in_img,scale = ratioresize(image)
in_img = in_img/255.0 - 0.5
in_img = np.transpose(in_img,(2,0,1))
in_img = np.expand_dims(in_img,axis=0).astype(np.float32)
output = onnx_model.run(None,{'images':in_img})
result = output[0][0]
#设置阈值
boxes = result[result[:,4] > 0.25]
boxes = (boxes * scale).astype(np.int32)
for box in boxes:
cv2.rectangle(image ,(box[0],box[1]),(box[2],box[3]),(255,0,0),3)
plt.imshow(image)