Skip to content

Commit

Permalink
add tests
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 26, 2024
1 parent 58ada07 commit 4be7cd2
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 11 deletions.
42 changes: 31 additions & 11 deletions tests/openvino/test_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -667,13 +667,14 @@ class OVPipelineForInpaintingTest(unittest.TestCase):
if is_transformers_version(">=", "4.40.0"):
SUPPORTED_ARCHITECTURES.append("stable-diffusion-3")
SUPPORTED_ARCHITECTURES.append("flux")
SUPPORTED_ARCHITECTURES.append("flux-fill")

AUTOMODEL_CLASS = AutoPipelineForInpainting
OVMODEL_CLASS = OVPipelineForInpainting

TASK = "inpainting"

def generate_inputs(self, height=128, width=128, batch_size=1, channel=3, input_type="pil"):
def generate_inputs(self, height=128, width=128, batch_size=1, channel=3, input_type="pil", model_arch=""):
inputs = _generate_prompts(batch_size=batch_size)

inputs["image"] = _generate_images(
Expand All @@ -683,7 +684,8 @@ def generate_inputs(self, height=128, width=128, batch_size=1, channel=3, input_
height=height, width=width, batch_size=batch_size, channel=1, input_type=input_type
)

inputs["strength"] = 0.75
if model_arch != "flux-fill":
inputs["strength"] = 0.75
inputs["height"] = height
inputs["width"] = width

Expand All @@ -699,7 +701,12 @@ def test_load_vanilla_model_which_is_not_supported(self):
@parameterized.expand(SUPPORTED_ARCHITECTURES)
@require_diffusers
def test_ov_pipeline_class_dispatch(self, model_arch: str):
auto_pipeline = self.AUTOMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
if model_arch != "flux-fill":
auto_pipeline = self.AUTOMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
else:
from diffusers import FluxFillPipeline

auto_pipeline = FluxFillPipeline.from_pretrained(MODEL_NAMES[model_arch])
ov_pipeline = self.OVMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])

self.assertEqual(ov_pipeline.auto_model_class, auto_pipeline.__class__)
Expand All @@ -713,7 +720,9 @@ def test_num_images_per_prompt(self, model_arch: str):
for height in [64, 128]:
for width in [64, 128]:
for num_images_per_prompt in [1, 3]:
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
inputs = self.generate_inputs(
height=height, width=width, batch_size=batch_size, model_arch=model_arch
)
outputs = pipeline(**inputs, num_images_per_prompt=num_images_per_prompt).images
self.assertEqual(outputs.shape, (batch_size * num_images_per_prompt, height, width, 3))

Expand Down Expand Up @@ -752,7 +761,9 @@ def test_shape(self, model_arch: str):
height, width, batch_size = 128, 64, 1

for input_type in ["pil", "np", "pt"]:
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size, input_type=input_type)
inputs = self.generate_inputs(
height=height, width=width, batch_size=batch_size, input_type=input_type, model_arch=model_arch
)

for output_type in ["pil", "np", "pt", "latent"]:
inputs["output_type"] = output_type
Expand All @@ -764,7 +775,7 @@ def test_shape(self, model_arch: str):
elif output_type == "pt":
self.assertEqual(outputs.shape, (batch_size, 3, height, width))
else:
if model_arch != "flux":
if not model_arch.startswith("flux"):
out_channels = (
pipeline.unet.config.out_channels
if pipeline.unet is not None
Expand All @@ -782,17 +793,26 @@ def test_shape(self, model_arch: str):
else:
packed_height = height // pipeline.vae_scale_factor // 2
packed_width = width // pipeline.vae_scale_factor // 2
channels = pipeline.transformer.config.in_channels
channels = (
pipeline.transformer.config.in_channels
if model_arch != "flux-fill"
else pipeline.transformer.out_channels
)
self.assertEqual(outputs.shape, (batch_size, packed_height * packed_width, channels))

@parameterized.expand(SUPPORTED_ARCHITECTURES)
@require_diffusers
def test_compare_to_diffusers_pipeline(self, model_arch: str):
ov_pipeline = self.OVMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
diffusers_pipeline = self.AUTOMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
if model_arch != "flux-fill":
diffusers_pipeline = self.AUTOMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])
else:
from diffusers import FluxFillPipeline

diffusers_pipeline = FluxFillPipeline.from_pretrained(MODEL_NAMES[model_arch])

height, width, batch_size = 64, 64, 1
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size, model_arch=model_arch)

for output_type in ["latent", "np", "pt"]:
inputs["output_type"] = output_type
Expand All @@ -804,7 +824,7 @@ def test_compare_to_diffusers_pipeline(self, model_arch: str):

# test generation when input resolution nondevisible on 64
height, width, batch_size = 96, 96, 1
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size, model_arch=model_arch)

for output_type in ["latent", "np", "pt"]:
inputs["output_type"] = output_type
Expand All @@ -820,7 +840,7 @@ def test_image_reproducibility(self, model_arch: str):
pipeline = self.OVMODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch])

height, width, batch_size = 64, 64, 1
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size, model_arch=model_arch)

for generator_framework in ["np", "pt"]:
ov_outputs_1 = pipeline(**inputs, generator=get_generator(generator_framework, SEED))
Expand Down
1 change: 1 addition & 0 deletions tests/openvino/utils_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
"falcon-40b": "katuni4ka/tiny-random-falcon-40b",
"flaubert": "hf-internal-testing/tiny-random-flaubert",
"flux": "katuni4ka/tiny-random-flux",
"flux-fill": "katuni4ka/tiny-random-flux-fill",
"gpt_bigcode": "hf-internal-testing/tiny-random-GPTBigCodeModel",
"gpt2": "hf-internal-testing/tiny-random-gpt2",
"gpt_neo": "hf-internal-testing/tiny-random-GPTNeoModel",
Expand Down

0 comments on commit 4be7cd2

Please sign in to comment.