4sys_path = [
'/workspace/smlab',
'/workspace']
6 if path
not in sys.path:
13from smutils.utils_vis
import draw_single_bbox_and_label
14from smutils.utils_vis
import vis_pose_coco_skeleton
15from smutils.utils_os
import search_file, create_directory
16from smutils.utils_data
import load_labelmap
17from smutils.utils_video
import make_video
19from smdataset.AnnotationDataManager
import AnnotationDataManager
28 for anno
in anno_data:
31 box = copy.deepcopy(anno[
'bbox'])
33 tmp_data[
'bbox'] = np.array(box)
34 tmp_data[
'keypoints'] = np.array(anno[
'keypoints'])
35 tmp_data[
'track_id'] = anno[
'track_id']
36 pose_result.append(tmp_data)
41action_vis_param = dict(
42 box_color = (0, 255, 0),
44 txt_color = (255, 255, 255),
51 box_color = (0, 255, 0),
53 txt_color = (255, 255, 255),
60action_cfg_path =
'/workspace/InnoTest/models/posec3d_action.py'
61posture_cfg_path =
'/workspace/InnoTest/models/posec3d_pose.py'
62image_base =
'/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/images'
63save_root =
'/media/safemotion/HDD5/pjm_test/inno2023_result_select_half'
67 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_kid_1',
68 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_kid_kkomo_1',
69 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_kid_kkumjalam_1',
70 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_kid_kkumjalam_hard_1',
71 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_kid_kkomo_2',
72 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_kid_kkumjalam_hard_1'
73 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_2nd_3',
74 '/media/safemotion/HDD2/SM_Dataset/action_2022/action_cctv_img/annotations_action_v22_2nd_4',
78cvt_lower_map = [0, 1, 2, 0, 4, 5, 6, 7, 8, 9, 0, 11]
79cvt_upper_map = [0, 0, 0, 3, 4, 0]
80cvt_pose_map = [0, 0, 0, 0, 4, 4, 6, 7]
81cvt_hand_map = [0, 0, 0, 3]
84create_directory(save_root)
88action_model = smrunner.build_model(action_cfg_path).to(device)
89posture_model = smrunner.build_model(posture_cfg_path).to(device)
95labelmap[
'action_upper'] = load_labelmap(
'/workspace/smlab/smaction/datasets/safemotion_v22_upper_action.txt')
96labelmap[
'action_lower'] = load_labelmap(
'/workspace/smlab/smaction/datasets/safemotion_v22_lower_action.txt')
97labelmap[
'pose'] = load_labelmap(
'/workspace/smlab/smaction/datasets/safemotion_v22_pose.txt')
98labelmap[
'hand'] = load_labelmap(
'/workspace/smlab/smaction/datasets/safemotion_v22_hand.txt')
102for anno_folder
in anno_folder_list:
103 name_list, path_list = search_file(anno_folder,
'.json')
104 json_path_list.extend(path_list)
105NUM = len(json_path_list)
110action_k = video_fps*2
116data_manager = AnnotationDataManager()
119for iter_n, json_path
in enumerate(json_path_list):
122 json_name = json_path.split(
'/')[-1]
123 save_folder = os.path.join(save_root, json_name.split(
'.')[0])
124 save_video_path = os.path.join(save_root, json_name.replace(
'.json',
'.mp4'))
125 create_directory(save_folder)
128 data_manager.load_annotation(json_path, image_base=image_base)
129 data_manager.init_annotation()
136 print(f
'[{iter_n+1}/{NUM}] {json_name} : {frame_id}', end=
'\r')
139 anno_data = data_manager.get_anno_data_in_image(filter=
True)
142 image = data_manager.read_image()
145 vis_img = image.copy()
146 vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR)
152 for pose
in pose_result:
153 track_id = int(pose[
'track_id'])
154 if track_id
not in track_data:
155 track_data[track_id] = []
156 pose[
'frame_id'] = frame_id
157 track_data[track_id].append(pose)
160 vis_img = vis_pose_coco_skeleton(vis_img, pose_result)
164 for track_id, pose_q
in track_data.items():
166 bbox = pose_q[-1][
'bbox']
168 last_frame = pose_q[-1][
'frame_id']
169 if last_frame == frame_id:
170 label = f
'{track_id:3d}'
171 vis_img = draw_single_bbox_and_label(vis_img, bbox, label, **vis_param)
175 if frame_id - last_frame > action_k*0.5:
176 delete_list.append(track_id)
181 if frame_id - last_frame > 15:
185 action_result = action_model.run_recognizer(pose_q, action_k, action_sample, device=device)
188 if action_result
is None:
191 posture_result = posture_model.run_recognizer(pose_q[-pose_k:], pose_k, pose_sample,
False, device=device)
194 if last_frame - pose_q[0][
'frame_id'] >= action_k:
199 pose_label = cvt_pose_map[posture_result[
'pred_pose']]
200 upper_label = cvt_upper_map[action_result[
'pred_action_upper']]
201 lower_label = cvt_lower_map[action_result[
'pred_action_lower']]
202 hand_label = cvt_hand_map[posture_result[
'pred_hand']]
205 label = f
"{track_id:3d}: {labelmap['pose'][pose_label]}"
208 label += f
"/{labelmap['hand'][hand_label]}"
210 label += f
"/{labelmap['action_upper'][upper_label]}"
212 label += f
"/{labelmap['action_lower'][lower_label]}"
214 cv2.putText(vis_img, label, (10, txt_pos_y), cv2.FONT_HERSHEY_DUPLEX, 1.5, (0, 0, 0), 10, 1)
215 cv2.putText(vis_img, label, (10, txt_pos_y), cv2.FONT_HERSHEY_DUPLEX, 1.5, (0, 255, 255), 2, 1)
218 vis_img = draw_single_bbox_and_label(vis_img, bbox, f
'{track_id:3d}', **action_vis_param)
221 for track_id
in delete_list:
222 del track_data[track_id]
225 if data_manager.get_number_of_image() <= (data_manager.image_idx+1):
229 data_manager.move_image_right()
235 name = f
'{frame_id:06d}.jpg'
236 save_path = os.path.join(save_folder, name)
237 cv2.imwrite(save_path, vis_img)
240 make_video(save_folder, save_video_path, fps=video_fps, half=
True)
241 shutil.rmtree(save_folder)
make_pose_result(anno_data)