利用AidLux实现电离目标检测与实时锁定演示-lesson4文章

基于yolov8实现目标检测功能,结合sort算法赋予id,实现电力目标锁定功能。

部分代码如下:

# aidlux相关

from cvs import *

import aidlite_gpu

from utils import detect_postprocess, preprocess_img, draw_detect_res, scale_boxes

import time

import cv2

import os

import numpy as np

import glob

import argparse

from filterpy.kalman import KalmanFilter

np.random.seed(0)

def linear_assignment(cost_matrix):

    try:

        import lap

        _, x, y = lap.lapjv(cost_matrix, extend_cost=True)

        return np.array([[y[i],i] for i in x if i >= 0]) #

    except ImportError:

        from scipy.optimize import linear_sum_assignment

        x, y = linear_sum_assignment(cost_matrix)

        return np.array(list(zip(x, y)))

def iou_batch(bb_test, bb_gt):

    """

    From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]

    """

    bb_gt = np.expand_dims(bb_gt, 0)

    bb_test = np.expand_dims(bb_test, 1)


    xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])

    yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])

    xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])

    yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])

    w = np.maximum(0., xx2 - xx1)

    h = np.maximum(0., yy2 - yy1)

    wh = w * h

    o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])                                     

        + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)                                             

    return(o)

def convert_bbox_to_z(bbox):

    """

    Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form

        [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is

        the aspect ratio

    """

    w = bbox[2] - bbox[0]

    h = bbox[3] - bbox[1]

    x = bbox[0] + w/2.

    y = bbox[1] + h/2.

    s = w * h    #scale is just area

    r = w / float(h)

    return np.array([x, y, s, r]).reshape((4, 1))

def convert_x_to_bbox(x, score=None):

    """

    Takes a bounding box in the centre form [x, y, s, r] and returns it in the form

        [x1,y1,x2,y2] where x1, y1 is the top left and x2, y2 is the bottom right

    """

    w = np.sqrt(x[2] * x[3])

    h = x[2] / w

    if(score==None):

        return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2.]).reshape((1,4))

    else:

        return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2., score]).reshape((1,5))

class KalmanBoxTracker(object):

    """

    This class represents the internal state of individual tracked objects observed as bbox.

    """

    count = 0

    def __init__(self,bbox):

        """

        Initialises a tracker using initial bounding box.

        """

        #define constant velocity model

        self.kf = KalmanFilter(dim_x=7, dim_z=4)

        self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],  [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])

        self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])

        self.kf.R[2:,2:] *= 10.

        self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities

        self.kf.P *= 10.

        self.kf.Q[-1,-1] *= 0.01

        self.kf.Q[4:,4:] *= 0.01

        self.kf.x[:4] = convert_bbox_to_z(bbox)

        self.time_since_update = 0

        self.id = KalmanBoxTracker.count

        KalmanBoxTracker.count += 1

        self.history = []

        self.hits = 0

        self.hit_streak = 0

        self.age = 0

    def update(self,bbox):

        """

        Updates the state vector with observed bbox.

        """

        self.time_since_update = 0

        self.history = []

        self.hits += 1

        self.hit_streak += 1

        self.kf.update(convert_bbox_to_z(bbox))

    def predict(self):

        """

        Advances the state vector and returns the predicted bounding box estimate.

        """

        if((self.kf.x[6]+self.kf.x[2])<=0):

            self.kf.x[6] *= 0.0

        self.kf.predict()

        self.age += 1

        if(self.time_since_update>0):

            self.hit_streak = 0

        self.time_since_update += 1

        self.history.append(convert_x_to_bbox(self.kf.x))

        return self.history[-1]

    def get_state(self):

        """

        Returns the current bounding box estimate.

        """

        return convert_x_to_bbox(self.kf.x)

def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):

    """

    Assigns detections to tracked object (both represented as bounding boxes)

    Returns 3 lists of matches, unmatched_detections and unmatched_trackers

    """

    if(len(trackers)==0):

        return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)

    iou_matrix = iou_batch(detections, trackers)

    if min(iou_matrix.shape) > 0:

        a = (iou_matrix > iou_threshold).astype(np.int32)

        if a.sum(1).max() == 1 and a.sum(0).max() == 1:

            matched_indices = np.stack(np.where(a), axis=1)

        else:

            matched_indices = linear_assignment(-iou_matrix)

    else:

        matched_indices = np.empty(shape=(0,2))

    unmatched_detections = []

    for d, det in enumerate(detections):

        if(d not in matched_indices[:,0]):

            unmatched_detections.append(d)

    unmatched_trackers = []

    for t, trk in enumerate(trackers):

        if(t not in matched_indices[:,1]):

            unmatched_trackers.append(t)

    #filter out matched with low IOU

    matches = []

    for m in matched_indices:

        if(iou_matrix[m[0], m[1]]<iou_threshold):

            unmatched_detections.append(m[0])

            unmatched_trackers.append(m[1])

        else:

            matches.append(m.reshape(1,2))

    if(len(matches)==0):

        matches = np.empty((0,2),dtype=int)

    else:

        matches = np.concatenate(matches,axis=0)

    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)

class Sort(object):

    def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):

        """

        Sets key parameters for SORT

        """

        self.max_age = max_age  # time_since_update > max_age, track被清除

        self.min_hits = min_hits

        self.iou_threshold = iou_threshold

        self.trackers = []

        self.frame_count = 0

    def update(self, dets=np.empty((0, 5))):

        """

        Params:

        dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]

        Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).

        Returns the a similar array, where the last column is the object ID.

        NOTE: The number of objects returned may differ from the number of detections provided.

        """

        self.frame_count += 1

        # get predicted locations from existing trackers.

        trks = np.zeros((len(self.trackers), 5))

        to_del = []

        ret = []

        for t, trk in enumerate(trks):

            pos = self.trackers[t].predict()[0]

            trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]

            if np.any(np.isnan(pos)):

                to_del.append(t)

        trks = np.ma.compress_rows(np.ma.masked_invalid(trks))

        for t in reversed(to_del):

            self.trackers.pop(t)

        matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.iou_threshold)

        # update matched trackers with assigned detections

        for m in matched:

            self.trackers[m[1]].update(dets[m[0], :])

        # create and initialize new trackers for unmatched detections

        for i in unmatched_dets:

            trk = KalmanBoxTracker(dets[i,:])

            self.trackers.append(trk)

        i = len(self.trackers)

        for trk in reversed(self.trackers):

            d = trk.get_state()[0]

            if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):

                ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive

            i -= 1

            # remove dead tracklet

            if(trk.time_since_update > self.max_age):

                self.trackers.pop(i)

        if(len(ret)>0):

            return np.concatenate(ret)

        return np.empty((0,5))

if __name__ == '__main__':

    mot_tracker = Sort(max_age = 1,  # time_since_update>max_age, 清楚在跟目标

                      min_hits = 3,  # hit_streak>min_hits, 转为确认态

                      iou_threshold = 0.3) # create instance of the SORT tracker

    # tflite模型

    model_path = '/home/yolov8/models/8086_best_float32.tflite'

    # 定义输入输出shape

    in_shape = [1 * 640 * 640 * 3 * 4]  # HWC, float32

    out_shape = [1 * 8400 * 52 * 4]  # 8400: total cells, 52 = 48(num_classes) + 4(xywh), float32

    # AidLite初始化

    aidlite = aidlite_gpu.aidlite()

    # 载入模型

    res = aidlite.ANNModel(model_path, in_shape, out_shape, 4, 0)

    print(res)

    ''' 读取手机后置摄像头 '''

    cap = cvs.VideoCapture(0)

    frame_id = 0

    while True:

        frame = cap.read()

        if frame is None:

            continue

        frame_id += 1

        if frame_id % 3 != 0:

            continue

        time0 = time.time()

        # 预处理

        img = preprocess_img(frame, target_shape=(640, 640), div_num=255, means=None, stds=None)

        aidlite.setInput_Float32(img, 640, 640)

        # 推理

        aidlite.invoke()

        preds = aidlite.getOutput_Float32(0)

        preds = preds.reshape(1, 52, 8400)

        preds = detect_postprocess(preds, frame.shape, [640, 640, 3], conf_thres=0.25, iou_thres=0.45)

        print('1 batch takes {} s'.format(time.time() - time0))

        if len(preds) != 0:

            preds[:, :4] = scale_boxes([640, 640], preds[:, :4], frame.shape)

            ''' SORT锁定 '''

            preds_out = preds[:, :5]  # 数据切片,得到格式如[x1, y1, x2, y2, conf]的ndarray。

            trackers = mot_tracker.update(preds_out)  # predict -> associate -> update

            ''' 绘制结果 '''

            for d in trackers:

                cv2.putText(frame, str(int(d[4])), (int(d[0]), int(d[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)

                cv2.rectangle(frame, (int(d[0]), int(d[1])), (int(d[2]), int(d[3])), (0, 0, 255),thickness = 2) 

        cvs.imshow(frame)

本视频为基于AidLux的演示视频,在安卓端实现yolov8实时检测以及sort电力目标锁定功能。

点击下方演示视频即可观看。

演示视频

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 221,635评论 6 515
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 94,543评论 3 399
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 168,083评论 0 360
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 59,640评论 1 296
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 68,640评论 6 397
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 52,262评论 1 308
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 40,833评论 3 421
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 39,736评论 0 276
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 46,280评论 1 319
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 38,369评论 3 340
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 40,503评论 1 352
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 36,185评论 5 350
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 41,870评论 3 333
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 32,340评论 0 24
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 33,460评论 1 272
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 48,909评论 3 376
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 45,512评论 2 359

推荐阅读更多精彩内容