Skip to content

Commit

Permalink
Rename ConcatenatedLoRALayer to MergedLayerPatch. And other minor cle…
Browse files Browse the repository at this point in the history
…anup.
  • Loading branch information
RyanJDick committed Jan 24, 2025
1 parent 31cea61 commit 5085a8c
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion invokeai/app/invocations/flux_text_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
raise ValueError(f"Unsupported model format: {t5_encoder_config.format}")

# Apply LoRA models to the T5 encoder.
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
# Note: We apply the LoRA after the encoder has been moved to its target device for faster patching.
exit_stack.enter_context(
LayerPatcher.apply_smart_model_patches(
model=t5_text_encoder,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ class Range:
end: int


class ConcatenatedLoRALayer(BaseLayerPatch):
"""A patch layer that is composed of multiple sub-layers concatenated together.
class MergedLayerPatch(BaseLayerPatch):
"""A patch layer that is composed of multiple sub-layers merged together.
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch

from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer, Range
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
Expand Down Expand Up @@ -113,7 +113,7 @@ def add_qkv_lora_layer_if_present(

dim_0_offset += src_weight_shape[0]

layers[dst_qkv_key] = ConcatenatedLoRALayer(sub_layers, sub_layer_ranges)
layers[dst_qkv_key] = MergedLayerPatch(sub_layers, sub_layer_ranges)

# time_text_embed.timestep_embedder -> time_in.
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
)
from invokeai.backend.patches.layer_patcher import LayerPatcher
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer, Range
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
from invokeai.backend.patches.layers.lokr_layer import LoKRLayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
from tests.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.test_custom_invoke_linear_8_bit_lt import (
build_linear_8bit_lt_layer,
Expand Down Expand Up @@ -328,7 +328,7 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest:
elif layer_type == "concatenated_lora":
sub_layer_out_features = [16, 16, 32]

# Create a ConcatenatedLoRA layer.
# Create a MergedLayerPatch.
sub_layers: list[LoRALayer] = []
sub_layer_ranges: list[Range] = []
dim_0_offset = 0
Expand All @@ -339,10 +339,10 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest:
sub_layers.append(LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=bias))
sub_layer_ranges.append(Range(dim_0_offset, dim_0_offset + out_features))
dim_0_offset += out_features
concatenated_lora_layer = ConcatenatedLoRALayer(sub_layers, sub_layer_ranges)
merged_layer_patch = MergedLayerPatch(sub_layers, sub_layer_ranges)

input = torch.randn(1, in_features)
return ([(concatenated_lora_layer, 0.7)], input)
return ([(merged_layer_patch, 0.7)], input)
elif layer_type == "flux_control_lora":
# Create a FluxControlLoRALayer.
patched_in_features = 40
Expand Down

0 comments on commit 5085a8c

Please sign in to comment.