Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8134 Add unit test for responsive inference #8146

Merged
merged 23 commits into from
Nov 24, 2024
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
42a45e0
Merge pull request #19 from Project-MONAI/master
Nic-Ma Feb 1, 2021
cd16a13
Merge pull request #32 from Project-MONAI/master
Nic-Ma Feb 24, 2021
6f87afd
Merge pull request #180 from Project-MONAI/dev
Nic-Ma Jul 22, 2021
f398298
Merge pull request #214 from Project-MONAI/dev
Nic-Ma Sep 8, 2021
ec463d6
Merge pull request #397 from Project-MONAI/dev
Nic-Ma Apr 4, 2022
ca62306
Merge pull request #429 from Project-MONAI/dev
Nic-Ma Jul 8, 2022
6b63f3e
Merge branch 'Project-MONAI:main' into main
Nic-Ma Jan 11, 2023
06b28e5
Merge pull request #472 from Project-MONAI/dev
Nic-Ma Oct 12, 2024
e8770af
Add realtime inference test
Nic-Ma Oct 12, 2024
61bfb63
[DLMED] Add dataflow
Nic-Ma Oct 15, 2024
0b61b54
[DLMED] add realtime inference config
Nic-Ma Oct 15, 2024
3f3a8d7
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Oct 15, 2024
5797465
Merge branch 'dev' into 8134-add-realtime-infer
Nic-Ma Oct 15, 2024
a4c8095
Merge branch 'dev' into 8134-add-realtime-infer
KumoLiu Nov 21, 2024
05fb8cf
mv to responsive_inference.json and format fix
KumoLiu Nov 21, 2024
02a84b9
rename config
KumoLiu Nov 21, 2024
c03b710
Update monai/bundle/workflows.py
KumoLiu Nov 22, 2024
de52a91
Merge branch 'dev' into 8134-add-realtime-infer
KumoLiu Nov 22, 2024
d3a2f4f
Merge remote-tracking branch 'Nic/8134-add-realtime-infer' into 8134-…
KumoLiu Nov 22, 2024
cbffae9
fix ci
KumoLiu Nov 22, 2024
9ed0078
Update monai/bundle/workflows.py
KumoLiu Nov 22, 2024
38dc4f0
Merge remote-tracking branch 'Nic/8134-add-realtime-infer' into 8134-…
KumoLiu Nov 22, 2024
faeff4a
fix format
KumoLiu Nov 22, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions monai/bundle/reference_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,16 @@ def get_resolved_content(self, id: str, **kwargs: Any) -> ConfigExpression | str
"""
return self._resolve_one_item(id=id, **kwargs)

def remove_resolved_content(self, id: str) -> Any | None:
Nic-Ma marked this conversation as resolved.
Show resolved Hide resolved
"""
Remove the resolved ``ConfigItem`` by id.

Args:
id: id name of the expected item.

"""
return self.resolved_content.pop(id) if id in self.resolved_content else None

@classmethod
def normalize_id(cls, id: str | int) -> str:
"""
Expand Down
11 changes: 9 additions & 2 deletions monai/bundle/workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,8 +394,15 @@ def check_properties(self) -> list[str] | None:
ret.extend(wrong_props)
return ret

def _run_expr(self, id: str, **kwargs: dict) -> Any:
return self.parser.get_parsed_content(id, **kwargs) if id in self.parser else None
def _run_expr(self, id: str, **kwargs: dict) -> list[Any]:
ret = []
KumoLiu marked this conversation as resolved.
Show resolved Hide resolved
if id in self.parser:
# suppose all the expressions are in a list, run and reset the expressions
for i in range(len(self.parser[id])):
sub_id = f"{id}{ID_SEP_KEY}{i}"
ret.append(self.parser.get_parsed_content(sub_id, **kwargs))
self.parser.ref_resolver.remove_resolved_content(sub_id)
Nic-Ma marked this conversation as resolved.
Show resolved Hide resolved
KumoLiu marked this conversation as resolved.
Show resolved Hide resolved
return ret

def _get_prop_id(self, name: str, property: dict) -> Any:
prop_id = property[BundlePropertyConfig.ID]
Expand Down
35 changes: 34 additions & 1 deletion tests/test_bundle_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from monai.data import Dataset
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.networks.nets import UNet
from monai.transforms import Compose, LoadImage
from monai.transforms import Compose, LoadImage, LoadImaged, SaveImaged
from tests.nonconfig_workflow import NonConfigWorkflow

TEST_CASE_1 = [os.path.join(os.path.dirname(__file__), "testing_data", "inference.json")]
Expand All @@ -35,6 +35,8 @@

TEST_CASE_3 = [os.path.join(os.path.dirname(__file__), "testing_data", "config_fl_train.json")]

TEST_CASE_4 = [os.path.join(os.path.dirname(__file__), "testing_data", "responsive_inference.json")]

TEST_CASE_NON_CONFIG_WRONG_LOG = [None, "logging.conf", "Cannot find the logging config file: logging.conf."]


Expand All @@ -45,7 +47,9 @@ def setUp(self):
self.expected_shape = (128, 128, 128)
test_image = np.random.rand(*self.expected_shape)
self.filename = os.path.join(self.data_dir, "image.nii")
self.filename1 = os.path.join(self.data_dir, "image1.nii")
nib.save(nib.Nifti1Image(test_image, np.eye(4)), self.filename)
nib.save(nib.Nifti1Image(test_image, np.eye(4)), self.filename1)

def tearDown(self):
shutil.rmtree(self.data_dir)
Expand Down Expand Up @@ -115,6 +119,35 @@ def test_inference_config(self, config_file):
self._test_inferer(inferer)
self.assertEqual(inferer.workflow_type, None)

@parameterized.expand([TEST_CASE_4])
def test_responsive_inference_config(self, config_file):
input_loader = LoadImaged(keys="image")
Nic-Ma marked this conversation as resolved.
Show resolved Hide resolved
output_saver = SaveImaged(keys="pred", output_dir=self.data_dir, output_postfix="seg")

# test standard MONAI model-zoo config workflow
inferer = ConfigWorkflow(
workflow_type="infer",
config_file=config_file,
logging_file=os.path.join(os.path.dirname(__file__), "testing_data", "logging.conf"),
)
# FIXME: temp add the property for test, we should add it to some formal realtime infer properties
inferer.add_property(name="dataflow", required=True, config_id="dataflow")

inferer.initialize()
inferer.dataflow.update(input_loader({"image": self.filename}))
inferer.run()
output_saver(inferer.dataflow)
self.assertTrue(os.path.exists(os.path.join(self.data_dir, "image", "image_seg.nii.gz")))

# bundle is instantiated and idle, just change the input for next inference
inferer.dataflow.clear()
inferer.dataflow.update(input_loader({"image": self.filename1}))
inferer.run()
output_saver(inferer.dataflow)
self.assertTrue(os.path.exists(os.path.join(self.data_dir, "image1", "image1_seg.nii.gz")))

inferer.finalize()

@parameterized.expand([TEST_CASE_3])
def test_train_config(self, config_file):
# test standard MONAI model-zoo config workflow
Expand Down
101 changes: 101 additions & 0 deletions tests/testing_data/responsive_inference.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
{
"imports": [
"$from collections import defaultdict"
],
"bundle_root": "will override",
"device": "$torch.device('cpu')",
"network_def": {
"_target_": "UNet",
"spatial_dims": 3,
"in_channels": 1,
"out_channels": 2,
"channels": [
2,
2,
4,
8,
4
],
"strides": [
2,
2,
2,
2
],
"num_res_units": 2,
"norm": "batch"
},
"network": "$@network_def.to(@device)",
"dataflow": "$defaultdict()",
"preprocessing": {
"_target_": "Compose",
"transforms": [
{
"_target_": "EnsureChannelFirstd",
"keys": "image"
},
{
"_target_": "ScaleIntensityd",
"keys": "image"
},
{
"_target_": "RandRotated",
"_disabled_": true,
"keys": "image"
}
]
},
"dataset": {
"_target_": "Dataset",
"data": [
"@dataflow"
],
"transform": "@preprocessing"
},
"dataloader": {
"_target_": "DataLoader",
"dataset": "@dataset",
"batch_size": 1,
"shuffle": false,
"num_workers": 0
},
"inferer": {
"_target_": "SlidingWindowInferer",
"roi_size": [
64,
64,
32
],
"sw_batch_size": 4,
"overlap": 0.25
},
"postprocessing": {
"_target_": "Compose",
"transforms": [
{
"_target_": "Activationsd",
"keys": "pred",
"softmax": true
},
{
"_target_": "AsDiscreted",
"keys": "pred",
"argmax": true
}
]
},
"evaluator": {
"_target_": "SupervisedEvaluator",
"device": "@device",
"val_data_loader": "@dataloader",
"network": "@network",
"inferer": "@inferer",
"postprocessing": "@postprocessing",
"amp": false,
"epoch_length": 1
},
"run": [
"[email protected]()",
"[email protected](@evaluator.state.output[0])"
]
}
Loading