Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix input scaling in centered-instance model #2054

Merged
merged 9 commits into from
Dec 20, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def linkcode_resolve(domain, info):
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
'css/tabs.css',
"css/tabs.css",
]

# Custom sidebar templates, must be a dictionary that maps document names
Expand Down
2 changes: 2 additions & 0 deletions sleap/nn/data/pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -775,6 +775,7 @@ def make_viz_pipeline(self, data_provider: Provider) -> Pipeline:
provider=data_provider,
)
pipeline += Normalizer.from_config(self.data_config.preprocessing)
pipeline += Resizer.from_config(self.data_config.preprocessing)
pipeline += InstanceCentroidFinder.from_config(
self.data_config.instance_cropping,
skeletons=self.data_config.labels.skeletons,
Expand Down Expand Up @@ -1250,6 +1251,7 @@ def make_viz_pipeline(self, data_provider: Provider) -> Pipeline:
provider=data_provider,
)
pipeline += Normalizer.from_config(self.data_config.preprocessing)
pipeline += Resizer.from_config(self.data_config.preprocessing)
pipeline += InstanceCentroidFinder.from_config(
self.data_config.instance_cropping,
skeletons=self.data_config.labels.skeletons,
Expand Down
64 changes: 57 additions & 7 deletions sleap/nn/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -727,11 +727,18 @@ class CentroidCropGroundTruth(tf.keras.layers.Layer):

Attributes:
crop_size: The length of the square box to extract around each centroid.
input_scale: Float indicating if the images should be resized before being
passed to the model.
"""

def __init__(self, crop_size: int):
def __init__(
self,
crop_size: int,
input_scale: float = 1.0,
):
super().__init__()
self.crop_size = crop_size
self.input_scale = input_scale

def call(self, example_gt: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Return the ground truth instance crops.
Expand All @@ -758,6 +765,9 @@ def call(self, example_gt: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""
# Pull out data from example.
full_imgs = example_gt["image"]
if self.input_scale != 1.0:
full_imgs = sleap.nn.data.resizing.resize_image(full_imgs, self.input_scale)
example_gt["centroids"] *= self.input_scale
crop_sample_inds = example_gt["centroids"].value_rowids() # (n_peaks,)
n_peaks = tf.shape(crop_sample_inds)[0] # total number of peaks in the batch
gitttt-1234 marked this conversation as resolved.
Show resolved Hide resolved
centroid_points = example_gt["centroids"].flat_values # (n_peaks, 2)
Expand Down Expand Up @@ -927,11 +937,12 @@ def __init__(
self.ensure_grayscale = ensure_grayscale
self.ensure_float = ensure_float

def preprocess(self, imgs: tf.Tensor) -> tf.Tensor:
def preprocess(self, imgs: tf.Tensor, resize_img: bool = True) -> tf.Tensor:
"""Apply all preprocessing operations configured for this layer.

Args:
imgs: A batch of images as a tensor.
resize_img: Bool to indicate if the images should be resized.

Returns:
The input tensor after applying preprocessing operations. The tensor will
Expand All @@ -947,7 +958,7 @@ def preprocess(self, imgs: tf.Tensor) -> tf.Tensor:
if self.ensure_float:
imgs = sleap.nn.data.normalization.ensure_float(imgs)

if self.input_scale != 1.0:
if resize_img and self.input_scale != 1.0:
imgs = sleap.nn.data.resizing.resize_image(imgs, self.input_scale)

if self.pad_to_stride > 1:
Expand Down Expand Up @@ -1636,6 +1647,9 @@ class CentroidCrop(InferenceLayer):
crop_size: Integer scalar specifying the height/width of the centered crops.
input_scale: Float indicating if the images should be resized before being
passed to the model.
precrop_resize: Float indicating the factor by which the original images
(not images resized for centroid model) should be resized before cropping.
Note: this resize only after getting the predictions for centroid model.
pad_to_stride: If not 1, input image will be paded to ensure that it is
divisible by this value (after scaling). This should be set to the max
stride of the model.
Expand Down Expand Up @@ -1676,6 +1690,7 @@ def __init__(
keras_model: tf.keras.Model,
crop_size: int,
input_scale: float = 1.0,
precrop_resize: float = 1.0,
pad_to_stride: int = 1,
output_stride: Optional[int] = None,
peak_threshold: float = 0.2,
Expand All @@ -1696,6 +1711,7 @@ def __init__(
)

self.crop_size = crop_size
self.precrop_resize = precrop_resize

self.confmaps_ind = confmaps_ind
self.offsets_ind = offsets_ind
Expand Down Expand Up @@ -1814,6 +1830,13 @@ def call(self, inputs):
# See: https://github.com/tensorflow/tensorflow/issues/6720
centroid_points = (centroid_points / self.input_scale) + 0.5

# resize full images
if self.precrop_resize != 1.0:
full_imgs = sleap.nn.data.resizing.resize_image(
full_imgs, self.precrop_resize
)
centroid_points *= self.precrop_resize

# Store crop offsets.
crop_offsets = centroid_points - (self.crop_size / 2)

Expand Down Expand Up @@ -1954,6 +1977,11 @@ class FindInstancePeaks(InferenceLayer):
centered instance confidence maps.
input_scale: Float indicating if the images should be resized before being
passed to the model.
resize_input_image: Bool indicating if the crops should be resized. If
`CentroidCropGroundTruth` or `CentroidCrop` is used along with `FindInstancePeaks`,
then the images are resized in the `CentroidCropGroundTruth` or `CentroidCrop`
before cropping and this is set to `False`. However, the output keypoints
are adjusted to the actual scale with the `input_scaling` argument.
output_stride: Output stride of the model, denoting the scale of the output
confidence maps relative to the images (after input scaling). This is used
for adjusting the peak coordinates to the image grid. This will be inferred
Expand Down Expand Up @@ -1984,6 +2012,7 @@ def __init__(
self,
keras_model: tf.keras.Model,
input_scale: float = 1.0,
resize_input_image: bool = True,
output_stride: Optional[int] = None,
peak_threshold: float = 0.2,
refinement: Optional[str] = "local",
Expand All @@ -1996,6 +2025,7 @@ def __init__(
super().__init__(
keras_model=keras_model, input_scale=input_scale, pad_to_stride=1, **kwargs
)
self.resize_input_image = resize_input_image
self.peak_threshold = peak_threshold
self.refinement = refinement
self.integral_patch_size = integral_patch_size
Expand Down Expand Up @@ -2093,7 +2123,7 @@ def call(
crop_sample_inds = tf.range(samples, dtype=tf.int32)

# Preprocess inputs (scaling, padding, colorspace, int to float).
crops = self.preprocess(crops)
crops = self.preprocess(crops, resize_img=self.resize_input_image)

# Network forward pass.
out = self.keras_model(crops)
Expand Down Expand Up @@ -2140,7 +2170,9 @@ def call(
if "crop_offsets" in inputs:
# Flatten (samples, ?, 2) -> (n_peaks, 2).
crop_offsets = inputs["crop_offsets"].merge_dims(0, 1)
peak_points = peak_points + tf.expand_dims(crop_offsets, axis=1)
peak_points = peak_points + (
tf.expand_dims(crop_offsets, axis=1) / self.input_scale
)

# Group peaks by sample (samples, ?, nodes, 2).
peaks = tf.RaggedTensor.from_value_rowids(
Expand Down Expand Up @@ -2343,7 +2375,7 @@ def _initialize_inference_model(self):

if use_gt_centroid:
centroid_crop_layer = CentroidCropGroundTruth(
crop_size=self.confmap_config.data.instance_cropping.crop_size
crop_size=self.confmap_config.data.instance_cropping.crop_size,
)
else:
if use_gt_confmap:
Expand All @@ -2354,6 +2386,7 @@ def _initialize_inference_model(self):
keras_model=self.centroid_model.keras_model,
crop_size=crop_size,
input_scale=self.centroid_config.data.preprocessing.input_scaling,
precrop_resize=1.0,
pad_to_stride=self.centroid_config.data.preprocessing.pad_to_stride,
output_stride=self.centroid_config.model.heads.centroid.output_stride,
peak_threshold=self.peak_threshold,
Expand All @@ -2375,7 +2408,14 @@ def _initialize_inference_model(self):
refinement="integral" if self.integral_refinement else "local",
integral_patch_size=self.integral_patch_size,
return_confmaps=False,
resize_input_image=False,
)
if use_gt_centroid:
centroid_crop_layer.input_scale = cfg.data.preprocessing.input_scaling
else:
centroid_crop_layer.precrop_resize = (
cfg.data.preprocessing.input_scaling
)

self.inference_model = TopDownInferenceModel(
centroid_crop=centroid_crop_layer, instance_peaks=instance_peaks_layer
Expand Down Expand Up @@ -3831,6 +3871,11 @@ class TopDownMultiClassFindPeaks(InferenceLayer):
centered instance confidence maps and classification.
input_scale: Float indicating if the images should be resized before being
passed to the model.
resize_input_image: Bool indicating if the crops should be resized. If
`CentroidCropGroundTruth` is used along with `FindInstancePeaks`, then the
images are resized in the `CentroidCropGroundTruth` and this is set to `False`.
However, the output keypoints are adjusted to the actual scale with the
`input_scaling` argument.
output_stride: Output stride of the model, denoting the scale of the output
confidence maps relative to the images (after input scaling). This is used
for adjusting the peak coordinates to the image grid. This will be inferred
Expand Down Expand Up @@ -3872,6 +3917,7 @@ def __init__(
self,
keras_model: tf.keras.Model,
input_scale: float = 1.0,
resize_input_image: bool = True,
output_stride: Optional[int] = None,
peak_threshold: float = 0.2,
refinement: Optional[str] = "local",
Expand All @@ -3887,6 +3933,7 @@ def __init__(
super().__init__(
keras_model=keras_model, input_scale=input_scale, pad_to_stride=1, **kwargs
)
self.resize_input_image = resize_input_image
self.peak_threshold = peak_threshold
self.refinement = refinement
self.integral_patch_size = integral_patch_size
Expand Down Expand Up @@ -4004,7 +4051,7 @@ def call(
crop_sample_inds = tf.range(samples, dtype=tf.int32)

# Preprocess inputs (scaling, padding, colorspace, int to float).
crops = self.preprocess(crops)
crops = self.preprocess(crops, resize_img=self.resize_input_image)

# Network forward pass.
out = self.keras_model(crops)
Expand Down Expand Up @@ -4253,7 +4300,10 @@ def _initialize_inference_model(self):
refinement="integral" if self.integral_refinement else "local",
integral_patch_size=self.integral_patch_size,
return_confmaps=False,
resize_input_image=False,
)
if use_gt_centroid:
centroid_crop_layer.input_scale = cfg.data.preprocessing.input_scaling

self.inference_model = TopDownMultiClassInferenceModel(
centroid_crop=centroid_crop_layer, instance_peaks=instance_peaks_layer
Expand Down
6 changes: 4 additions & 2 deletions sleap/nn/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -1315,10 +1315,11 @@ def _setup_visualization(self):
# Create an instance peak finding layer.
find_peaks = FindInstancePeaks(
keras_model=self.keras_model,
input_scale=self.config.data.preprocessing.input_scaling,
input_scale=1.0,
peak_threshold=0.2,
refinement="local",
return_confmaps=True,
resize_input_image=False,
)

def visualize_example(example):
Expand Down Expand Up @@ -1755,10 +1756,11 @@ def _setup_visualization(self):
# Create an instance peak finding layer.
find_peaks = FindInstancePeaks(
keras_model=self.keras_model,
input_scale=self.config.data.preprocessing.input_scaling,
input_scale=1.0,
peak_threshold=0.2,
refinement="local",
return_confmaps=True,
resize_input_image=False,
)

def visualize_example(example):
Expand Down
Loading