We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
我在将 ConvNeXt-V2 转为 onnx 时遇到报错:Unsupported nms type: soft_nms. 最新的mmcv nms type 包含了 soft_nms,而当前的mmdeploy并未支持此nms type
from mmdeploy.apis import torch2onnx img = '/mmdetection/demo/demo.jpg' work_dir = '/work/' deploy_cfg = '/mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py' device = 'cpu' save_file = 'ConvNeXt-V2.onnx' model_cfg = '/mmdetection/projects/ConvNeXt-V2/configs/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco.py' model_checkpoint = '/checkpoints/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco_20230113_110947-757ee2dd.pth' torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device)
NA
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ mmdeploy/apis/core/pipeline_manager.py:356: in _wrap return self.call_function(func_name_, *args, **kwargs) mmdeploy/apis/core/pipeline_manager.py:326: in call_function return self.call_function_local(func_name, *args, **kwargs) mmdeploy/apis/core/pipeline_manager.py:275: in call_function_local return pipe_caller(*args, **kwargs) mmdeploy/apis/core/pipeline_manager.py:107: in __call__ ret = func(*args, **kwargs) mmdeploy/apis/pytorch2onnx.py:111: in torch2onnx optimize=optimize) mmdeploy/apis/core/pipeline_manager.py:356: in _wrap return self.call_function(func_name_, *args, **kwargs) mmdeploy/apis/core/pipeline_manager.py:326: in call_function return self.call_function_local(func_name, *args, **kwargs) mmdeploy/apis/core/pipeline_manager.py:275: in call_function_local return pipe_caller(*args, **kwargs) mmdeploy/apis/core/pipeline_manager.py:107: in __call__ ret = func(*args, **kwargs) mmdeploy/apis/onnx/export.py:148: in export verbose=verbose) /opt/conda/lib/python3.7/site-packages/torch/onnx/__init__.py:280: in export custom_opsets, enable_onnx_checker, use_external_data_format) /opt/conda/lib/python3.7/site-packages/torch/onnx/utils.py:94: in export use_external_data_format=use_external_data_format) /opt/conda/lib/python3.7/site-packages/torch/onnx/utils.py:695: in _export dynamic_axes=dynamic_axes) mmdeploy/apis/onnx/optimizer.py:27: in model_to_graph__custom_optimizer graph, params_dict, torch_out = ctx.origin_func(*args, **kwargs) /opt/conda/lib/python3.7/site-packages/torch/onnx/utils.py:459: in _model_to_graph _retain_param_name) /opt/conda/lib/python3.7/site-packages/torch/onnx/utils.py:422: in _create_jit_graph graph, torch_out = _trace_and_get_graph_from_model(model, args) /opt/conda/lib/python3.7/site-packages/torch/onnx/utils.py:373: in _trace_and_get_graph_from_model torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True) /opt/conda/lib/python3.7/site-packages/torch/jit/_trace.py:1160: in _get_trace_graph outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs) /opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/lib/python3.7/site-packages/torch/jit/_trace.py:132: in forward self._force_outplace, /opt/conda/lib/python3.7/site-packages/torch/jit/_trace.py:118: in wrapper outs.append(self.inner(*trace_inputs)) /opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py:1051: in _call_impl return forward_call(*input, **kwargs) /opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py:1039: in _slow_forward result = self.forward(*input, **kwargs) mmdeploy/apis/onnx/export.py:123: in wrapper return forward(*arg, **kwargs) mmdeploy/codebase/mmdet/models/detectors/two_stage.py:91: in two_stage_detector__forward x, rpn_results_list, data_samples, rescale=False) /mmdetection/mmdet/models/roi_heads/base_roi_head.py:123: in predict rescale=bbox_rescale) mmdeploy/codebase/mmdet/models/roi_heads/standard_roi_head.py:68: in standard_roi_head__predict_bbox rescale=rescale) mmdeploy/codebase/mmdet/models/roi_heads/bbox_head.py:145: in bbox_head__predict_by_feat keep_top_k=keep_top_k) mmdeploy/core/optimizers/function_marker.py:266: in g rets = f(*args, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ boxes = tensor([[[415.9427, 337.5312, 849.6290, 721.6528], [900.3839, 207.7246, 978.8644, 245.0121], [357.08...960, 282.0446], [403.1835, 317.6036, 859.9055, 727.7901], [ 0.0000, 0.0000, 0.0000, 0.0000]]]) scores = tensor([[[6.0523e-06, 1.5483e-05, 1.0588e-05, ..., 1.1332e-09, 5.3504e-14, 9.9225e-11], [7.6693e-0...e-12, 9.5376e-10], [1.3522e-04, 1.6717e-06, 3.5402e-05, ..., 6.7683e-07, 1.2107e-08, 5.0770e-07]]]) max_output_boxes_per_class = 200, iou_threshold = 0.5, score_threshold = 0.05, pre_top_k = 5000, keep_top_k = 100 output_index = False, nms_type = 'soft_nms' @mark( 'multiclass_nms', inputs=['boxes', 'scores'], outputs=['dets', 'labels', 'index']) def multiclass_nms(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1, output_index: bool = False, nms_type='nms'): """Apis for multiclass nms.""" if nms_type == 'nms': return _multiclass_nms( boxes, scores, max_output_boxes_per_class=max_output_boxes_per_class, iou_threshold=iou_threshold, score_threshold=score_threshold, pre_top_k=pre_top_k, keep_top_k=keep_top_k, output_index=output_index) elif nms_type == 'nms_rotated': return multiclass_nms_rotated( boxes, scores, max_output_boxes_per_class=max_output_boxes_per_class, iou_threshold=iou_threshold, score_threshold=score_threshold, pre_top_k=pre_top_k, keep_top_k=keep_top_k) elif nms_type == 'nms_match': return multiclass_nms_match( boxes, scores, max_output_boxes_per_class=max_output_boxes_per_class, iou_threshold=iou_threshold, score_threshold=score_threshold, pre_top_k=pre_top_k, keep_top_k=keep_top_k) else: > raise NotImplementedError(f'Unsupported nms type: {nms_type}.') E NotImplementedError: Unsupported nms type: soft_nms. mmdeploy/mmcv/ops/nms.py:545: NotImplementedError
The text was updated successfully, but these errors were encountered:
No branches or pull requests
Checklist
Describe the bug
我在将 ConvNeXt-V2 转为 onnx 时遇到报错:Unsupported nms type: soft_nms.
最新的mmcv nms type 包含了 soft_nms,而当前的mmdeploy并未支持此nms type
Reproduction
Environment
Error traceback
The text was updated successfully, but these errors were encountered: