diff --git a/src/diffusers/loaders/lora_pipeline.py b/src/diffusers/loaders/lora_pipeline.py index 351295e938ff..5264841c31e3 100644 --- a/src/diffusers/loaders/lora_pipeline.py +++ b/src/diffusers/loaders/lora_pipeline.py @@ -2468,6 +2468,9 @@ def _maybe_expand_lora_state_dict(cls, transformer, lora_state_dict): base_param_name = ( f"{k.replace(prefix, '')}.base_layer.weight" if is_peft_loaded else f"{k.replace(prefix, '')}.weight" ) + # This means the corresponding layer doesn't have any LoRA. + if base_param_name not in transformer_state_dict: + continue base_weight_param = transformer_state_dict[base_param_name] lora_A_param = lora_state_dict[f"{prefix}{k}.lora_A.weight"] diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 0861160de6aa..b7352a4c2ad8 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import copy import gc import os import sys @@ -162,6 +163,56 @@ def test_with_alpha_in_state_dict(self): ) self.assertFalse(np.allclose(images_lora_with_alpha, images_lora, atol=1e-3, rtol=1e-3)) + def test_lora_expansion_works_for_absent_keys(self): + components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertTrue(output_no_lora.shape == self.output_shape) + + # Modify the config to have a layer which won't be present in the second LoRA we will load. + modified_denoiser_lora_config = copy.deepcopy(denoiser_lora_config) + modified_denoiser_lora_config.target_modules.add("x_embedder") + + pipe.transformer.add_adapter(modified_denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + + images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertFalse( + np.allclose(images_lora, output_no_lora, atol=1e-3, rtol=1e-3), + "LoRA should lead to different results.", + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + denoiser_state_dict = get_peft_model_state_dict(pipe.transformer) + self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + + # modify the state dict to have alpha values following + # https://huggingface.co/TheLastBen/Jon_Snow_Flux_LoRA/blob/main/jon_snow.safetensors + lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + lora_state_dict_without_xembedder = {k: v for k, v in lora_state_dict.items() if "x_embedder" not in k} + + pipe.unload_lora_weights() + pipe.load_lora_weights(lora_state_dict_without_xembedder) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + images_lora_with_absent_keys = pipe(**inputs, generator=torch.manual_seed(0)).images + + self.assertFalse( + np.allclose(images_lora, images_lora_with_absent_keys, atol=1e-3, rtol=1e-3), + "Different LoRAs should lead to different results.", + ) + self.assertFalse( + np.allclose(output_no_lora, images_lora_with_absent_keys, atol=1e-3, rtol=1e-3), + "LoRA should lead to different results.", + ) + @unittest.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass