diff --git a/common/arguments.py b/common/arguments.py index a9d4843..eaa47fc 100644 --- a/common/arguments.py +++ b/common/arguments.py @@ -45,6 +45,7 @@ def parse_args(): parser.add_argument('-arc', '--architecture', default='3,3,3', type=str, metavar='LAYERS', help='filter widths separated by comma') parser.add_argument('--causal', action='store_true', help='use causal convolutions for real-time processing') parser.add_argument('-ch', '--channels', default=1024, type=int, metavar='N', help='number of channels in convolution layers') + parser.add_argument('--no_kp_probs', dest='keypoint_probs', help='don\'t use keypoint probabilities', action='store_false') # Experimental parser.add_argument('--subset', default=1, type=float, metavar='FRACTION', help='reduce dataset size by fraction') diff --git a/run.py b/run.py index 3f55164..646a8f6 100644 --- a/run.py +++ b/run.py @@ -93,7 +93,10 @@ for cam_idx, kps in enumerate(keypoints[subject][action]): # Normalize camera frame cam = dataset.cameras()[subject][cam_idx] - kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h']) + if args.keypoint_probs: + kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h']) + else: + kps = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h']) keypoints[subject][action][cam_idx] = kps subjects_train = args.subjects_train.split(',') @@ -837,4 +840,4 @@ def run_evaluation(actions, action_filter=None): for subject in all_actions_by_subject.keys(): print('Evaluating on subject', subject) run_evaluation(all_actions_by_subject[subject], action_filter) - print('') \ No newline at end of file + print('')