pip install opencv-python==4.1.0.25 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install opencv-contrib-python==4.1.0.25 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install multiprocess
import os,sys
import numpyas np
import cv2
from PILimport Image
from multiprocessingimport Pool
import argparse
# from IPython import embed #to debug
def ToImg(raw_flow,bound):
'''
this function scale the input pixels to 0-255 with bi-bound
:paramraw_flow: input raw pixel value (not in 0-255)
:parambound: upper and lower bound (-bound, bound)
:return: pixel value scale from 0 to 255
'''
flow=raw_flow
flow[flow>bound]=bound
flow[flow<-bound]=-bound
flow-=-bound
flow*=(255/float(2*bound))
return flow
def save_flows(flows,image,save_dir,num,bound):
'''
To save the optical flow images and raw images
:paramflows: contains flow_x and flow_y
:paramimage: raw image
:paramsave_dir: save_dir name (always equal to the video id)
:paramnum: the save id, which belongs one of the extracted frames
:parambound: set the bi-bound to flow images
:return: return 0
'''
#rescale to 0~255 with the bound setting
flow_x=ToImg(flows[...,0],bound)
flow_y=ToImg(flows[...,1],bound)
if not os.path.exists(os.path.join(new_dir,save_dir)):
os.makedirs(os.path.join(new_dir,save_dir))
#save the image
save_img=os.path.join(new_dir,save_dir,'img_{:05d}.jpg'.format(num))
cv2.imwrite(save_img,image)
#save the flows
save_x=os.path.join(new_dir,save_dir,'flow_x_{:05d}.jpg'.format(num))
save_y=os.path.join(new_dir,save_dir,'flow_y_{:05d}.jpg'.format(num))
flow_x_img=np.array(flow_x)
flow_y_img=np.array(flow_y)
cv2.imwrite(save_x,flow_x_img)
cv2.imwrite(save_y,flow_y_img)
return 0
def dense_flow(augs):
'''
To extract dense_flow images
:paramaugs:the detailed augments:
video_name: the video name which is like: 'v_xxxxxxx',if different ,please have a modify.
save_dir: the destination path's final direction name.
step: num of frames between each two extracted frames
bound: bi-bound parameter
:return: no returns
'''
video_path,save_dir,step,bound=augs
# provide two video-read methods: cv2.VideoCapture() and skvideo.io.vread(), both of which need ffmpeg support
# videocapture=cv2.VideoCapture(video_path)
# if not videocapture.isOpened():
# print 'Could not initialize capturing! ', video_name
videocapture=cv2.VideoCapture(video_path)
# if extract nothing, exit!
frame_num=0
image,prev_image,gray,prev_gray=None,None,None,None
num0=0
# videocapture.set(cv2.CAP_PROP_POS_FRAMES,0) #设置要获取的帧号
# a,frame=videocapture.read() #read方法返回一个布尔值和一个视频帧。若帧读取成功,则返回True
while True:
suc,frame=videocapture.read()
num0+=1
if frame_num==0:
prev_image=frame
prev_gray=cv2.cvtColor(prev_image,cv2.COLOR_RGB2GRAY)
frame_num+=1
# to pass the out of stepped frames
step_t=step
while step_t>1:
#frame=videocapture.read()
num0+=1
step_t-=1
continue
image=frame
gray=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
frame_0=prev_gray
frame_1=gray
##default choose the tvl1 algorithm
dtvl1=cv2.optflow.DualTVL1OpticalFlow_create()
flowDTVL1=dtvl1.calc(frame_0,frame_1,None)
save_flows(flowDTVL1,image,save_dir,frame_num,bound)#this is to save flows and img.
prev_gray=gray
prev_image=image
frame_num+=1
# to pass the out of stepped frames
step_t=step
while step_t>1:
#frame=videocapture.read()
num0+=1
step_t-=1
def get_video_list():
video_list=[]
for cls_namesin os.listdir(videos_root):
cls_path=os.path.join(videos_root,cls_names)
for video_in os.listdir(cls_path):
video_list.append(video_)
video_list.sort()
return video_list,len(video_list)
if __name__ =='__main__':
# example: if the data path not setted from args,just manually set them as belows.
#dataset='ucf101'
#data_root='/S2/MI/zqj/video_classification/data'
#data_root=os.path.join(data_root,dataset)
videos_root=os.path.join(r'C:\Users\USER\Desktop\test_cpu\xdw_baseline\data\mod-ucf101\videos')
#specify the augments
num_workers=4
step=1
bound=15
s_=0
e_=13320
new_dir=r'C:\Users\USER\Desktop\test_cpu\xdw_baseline\data\mod-ucf101'+'/'+'flow'
videos_list = []
flows_dirs = []
for iin os.listdir(r'C:\Users\USER\Desktop\test_cpu\xdw_baseline\data\mod-ucf101\videos'):
videos_list.append(r'C:\Users\USER\Desktop\test_cpu\xdw_baseline\data\mod-ucf101\videos'+'/'+i)
flows_dirs.append(r'C:\Users\USER\Desktop\test_cpu\xdw_baseline\data\mod-ucf101\flow'+'/'+i.split('.')[0])
len_videos=min(13320-s_,13320-s_)# if we choose the ucf101
pool=Pool(num_workers)
# if mode=='run':
# pool.map(dense_flow,zip(video_list,flows_dirs,[step]*len(video_list),[bound]*len(video_list)))
# else: #mode=='debug
dense_flow((videos_list[0],flows_dirs[0],step,bound))