From 6414d2d372d3efc352debe1f88f2670267ca8dbd Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 29 Jun 2024 11:33:37 +0530 Subject: [PATCH] feat: Update Tile Pre-Processor to support more modes --- .../controlnet_image_processors.py | 127 +++++--- .../fast_guided_filter/fast_guided_filter.py | 281 ++++++++++++++++++ .../frontend/web/src/services/api/schema.ts | 251 ++++++++-------- 3 files changed, 493 insertions(+), 166 deletions(-) create mode 100644 invokeai/backend/image_util/fast_guided_filter/fast_guided_filter.py diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index c0b332f27b6..10d3e3df476 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -1,51 +1,47 @@ # Invocations for ControlNet image preprocessors # initial implementation by Gregg Helt, 2023 # heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux +import random from builtins import bool, float from pathlib import Path -from typing import Dict, List, Literal, Union +from typing import Any, Dict, List, Literal, Union import cv2 import numpy as np -from controlnet_aux import ( - ContentShuffleDetector, - LeresDetector, - MediapipeFaceDetector, - MidasDetector, - MLSDdetector, - NormalBaeDetector, - PidiNetDetector, - SamDetector, - ZoeDetector, -) +from controlnet_aux import (ContentShuffleDetector, LeresDetector, + MediapipeFaceDetector, MidasDetector, MLSDdetector, + NormalBaeDetector, PidiNetDetector, SamDetector, + ZoeDetector) from controlnet_aux.util import HWC3, ade_palette from PIL import Image from pydantic import BaseModel, Field, field_validator, model_validator -from invokeai.app.invocations.fields import ( - FieldDescriptions, - ImageField, - InputField, - OutputField, - UIType, - WithBoard, - WithMetadata, -) +from invokeai.app.invocations.fields import (FieldDescriptions, ImageField, + InputField, OutputField, UIType, + WithBoard, WithMetadata) from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import ImageOutput -from invokeai.app.invocations.util import validate_begin_end_step, validate_weights +from invokeai.app.invocations.util import (validate_begin_end_step, + validate_weights) from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize +from invokeai.app.util.controlnet_utils import (CONTROLNET_MODE_VALUES, + CONTROLNET_RESIZE_VALUES, + heuristic_resize) from invokeai.backend.image_util.canny import get_canny_edges -from invokeai.backend.image_util.depth_anything import DEPTH_ANYTHING_MODELS, DepthAnythingDetector -from invokeai.backend.image_util.dw_openpose import DWPOSE_MODELS, DWOpenposeDetector +from invokeai.backend.image_util.depth_anything import (DEPTH_ANYTHING_MODELS, + DepthAnythingDetector) +from invokeai.backend.image_util.dw_openpose import (DWPOSE_MODELS, + DWOpenposeDetector) +from invokeai.backend.image_util.fast_guided_filter.fast_guided_filter import \ + FastGuidedFilter from invokeai.backend.image_util.hed import HEDProcessor from invokeai.backend.image_util.lineart import LineartProcessor from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor from invokeai.backend.image_util.util import np_to_pil, pil_to_np from invokeai.backend.util.devices import TorchDevice -from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output +from .baseinvocation import (BaseInvocation, BaseInvocationOutput, + Classification, invocation, invocation_output) class ControlField(BaseModel): @@ -483,30 +479,73 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation): # res: int = InputField(default=512, ge=0, le=1024, description="The pixel resolution for each tile") down_sampling_rate: float = InputField(default=1.0, ge=1.0, le=8.0, description="Down sampling rate") + mode: Literal["regular", "blur", "var", "super"] = InputField( + default="regular", description="The controlnet tile model being used" + ) + + def apply_gaussian_blur(self, image_np: np.ndarray[Any, Any], ksize: int = 5, sigmaX: float = 1.0): + if ksize % 2 == 0: + ksize += 1 # ksize must be odd + blurred_image = cv2.GaussianBlur(image_np, (ksize, ksize), sigmaX=sigmaX) + return blurred_image + + def apply_guided_filter(self, image_np: np.ndarray[Any, Any], radius: int, eps: float, scale: int): + filter = FastGuidedFilter(image_np, radius, eps, scale) + return filter.filter(image_np) + + # based off https://huggingface.co/TTPlanet/TTPLanet_SDXL_Controlnet_Tile_Realistic + def tile_resample(self, np_img: np.ndarray[Any, Any]): + height, width, _ = np_img.shape - # tile_resample copied from sd-webui-controlnet/scripts/processor.py - def tile_resample( - self, - np_img: np.ndarray, - res=512, # never used? - down_sampling_rate=1.0, - ): - np_img = HWC3(np_img) - if down_sampling_rate < 1.1: + if self.mode == "regular": + np_img = HWC3(np_img) + if self.down_sampling_rate < 1.1: + return np_img + + height = int(float(height) / float(self.down_sampling_rate)) + width = int(float(width) / float(self.down_sampling_rate)) + np_img = cv2.resize(np_img, (width, height), interpolation=cv2.INTER_AREA) return np_img - H, W, C = np_img.shape - H = int(float(H) / float(down_sampling_rate)) - W = int(float(W) / float(down_sampling_rate)) - np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA) + + ratio = np.sqrt(1024.0 * 1024.0 / (width * height)) + + resize_w, resize_h = int(width * ratio), int(height * ratio) + + if self.mode == "super": + resize_w, resize_h = int(width * ratio) // 48 * 48, int(height * ratio) // 48 * 48 + + np_img = cv2.resize(np_img, (resize_w, resize_h)) + + if self.mode == "blur": + blur_strength = random.sample([i / 10.0 for i in range(10, 201, 2)], k=1)[0] + radius = random.sample([i for i in range(1, 40, 2)], k=1)[0] + eps = random.sample([i / 1000.0 for i in range(1, 101, 2)], k=1)[0] + scale_factor = random.sample([i / 10.0 for i in range(10, 181, 5)], k=1)[0] + + if random.random() > 0.5: + np_img = self.apply_gaussian_blur(np_img, ksize=int(blur_strength), sigmaX=blur_strength / 2) + + if random.random() > 0.5: + np_img = self.apply_guided_filter(np_img, radius, eps, int(scale_factor)) + + np_img = cv2.resize( + np_img, (int(resize_w / scale_factor), int(resize_h / scale_factor)), interpolation=cv2.INTER_AREA + ) + np_img = cv2.resize(np_img, (resize_w, resize_h), interpolation=cv2.INTER_CUBIC) + + if self.mode == "var": + pass + + if self.mode == "super": + pass + + np_img = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB) + return np_img def run_processor(self, image: Image.Image) -> Image.Image: np_img = np.array(image, dtype=np.uint8) - processed_np_image = self.tile_resample( - np_img, - # res=self.tile_size, - down_sampling_rate=self.down_sampling_rate, - ) + processed_np_image = self.tile_resample(np_img) processed_image = Image.fromarray(processed_np_image) return processed_image diff --git a/invokeai/backend/image_util/fast_guided_filter/fast_guided_filter.py b/invokeai/backend/image_util/fast_guided_filter/fast_guided_filter.py new file mode 100644 index 00000000000..e59a7c90b75 --- /dev/null +++ b/invokeai/backend/image_util/fast_guided_filter/fast_guided_filter.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- +## @package guided_filter.core.filters +# +# Implementation of guided filter. +# * GuidedFilter: Original guided filter. +# * FastGuidedFilter: Fast version of the guided filter. +# @author tody +# @date 2015/08/26 + +import cv2 +import numpy as np + + +## Convert image into float32 type. +def to32F(img): + if img.dtype == np.float32: + return img + return (1.0 / 255.0) * np.float32(img) + + +## Convert image into uint8 type. +def to8U(img): + if img.dtype == np.uint8: + return img + return np.clip(np.uint8(255.0 * img), 0, 255) + + +## Return if the input image is gray or not. +def _isGray(I): + return len(I.shape) == 2 + + +## Return down sampled image. +# @param scale (w/s, h/s) image will be created. +# @param shape I.shape[:2]=(h, w). numpy friendly size parameter. +def _downSample(I, scale=4, shape=None): + if shape is not None: + h, w = shape + return cv2.resize(I, (w, h), interpolation=cv2.INTER_NEAREST) + + h, w = I.shape[:2] + return cv2.resize(I, (int(w / scale), int(h / scale)), interpolation=cv2.INTER_NEAREST) + + +## Return up sampled image. +# @param scale (w*s, h*s) image will be created. +# @param shape I.shape[:2]=(h, w). numpy friendly size parameter. +def _upSample(I, scale=2, shape=None): + if shape is not None: + h, w = shape + return cv2.resize(I, (w, h), interpolation=cv2.INTER_LINEAR) + + h, w = I.shape[:2] + return cv2.resize(I, (int(w * scale), int(h * scale)), interpolation=cv2.INTER_LINEAR) + + +## Fast guide filter. +class FastGuidedFilter: + ## Constructor. + # @param I Input guidance image. Color or gray. + # @param radius Radius of Guided Filter. + # @param epsilon Regularization term of Guided Filter. + # @param scale Down sampled scale. + def __init__(self, I, radius=5, epsilon=0.4, scale=4): + I_32F = to32F(I) + self._I = I_32F + h, w = I.shape[:2] + + I_sub = _downSample(I_32F, scale) + + self._I_sub = I_sub + radius = int(radius / scale) + + if _isGray(I): + self._guided_filter = GuidedFilterGray(I_sub, radius, epsilon) + else: + self._guided_filter = GuidedFilterColor(I_sub, radius, epsilon) + + ## Apply filter for the input image. + # @param p Input image for the filtering. + def filter(self, p): + p_32F = to32F(p) + shape_original = p.shape[:2] + + p_sub = _downSample(p_32F, shape=self._I_sub.shape[:2]) + + if _isGray(p_sub): + return self._filterGray(p_sub, shape_original) + + cs = p.shape[2] + q = np.array(p_32F) + + for ci in range(cs): + q[:, :, ci] = self._filterGray(p_sub[:, :, ci], shape_original) + return to8U(q) + + def _filterGray(self, p_sub, shape_original): + ab_sub = self._guided_filter._computeCoefficients(p_sub) + ab = [_upSample(abi, shape=shape_original) for abi in ab_sub] + return self._guided_filter._computeOutput(ab, self._I) + + +## Guide filter. +class GuidedFilter: + ## Constructor. + # @param I Input guidance image. Color or gray. + # @param radius Radius of Guided Filter. + # @param epsilon Regularization term of Guided Filter. + def __init__(self, I, radius=5, epsilon=0.4): + I_32F = to32F(I) + + if _isGray(I): + self._guided_filter = GuidedFilterGray(I_32F, radius, epsilon) + else: + self._guided_filter = GuidedFilterColor(I_32F, radius, epsilon) + + ## Apply filter for the input image. + # @param p Input image for the filtering. + def filter(self, p): + return to8U(self._guided_filter.filter(p)) + + +## Common parts of guided filter. +# +# This class is used by guided_filter class. GuidedFilterGray and GuidedFilterColor. +# Based on guided_filter._computeCoefficients, guided_filter._computeOutput, +# GuidedFilterCommon.filter computes filtered image for color and gray. +class GuidedFilterCommon: + def __init__(self, guided_filter): + self._guided_filter = guided_filter + + ## Apply filter for the input image. + # @param p Input image for the filtering. + def filter(self, p): + p_32F = to32F(p) + if _isGray(p_32F): + return self._filterGray(p_32F) + + cs = p.shape[2] + q = np.array(p_32F) + + for ci in range(cs): + q[:, :, ci] = self._filterGray(p_32F[:, :, ci]) + return q + + def _filterGray(self, p): + ab = self._guided_filter._computeCoefficients(p) + return self._guided_filter._computeOutput(ab, self._guided_filter._I) + + +## Guided filter for gray guidance image. +class GuidedFilterGray: + # @param I Input gray guidance image. + # @param radius Radius of Guided Filter. + # @param epsilon Regularization term of Guided Filter. + def __init__(self, I, radius=5, epsilon=0.4): + self._radius = 2 * radius + 1 + self._epsilon = epsilon + self._I = to32F(I) + self._initFilter() + self._filter_common = GuidedFilterCommon(self) + + ## Apply filter for the input image. + # @param p Input image for the filtering. + def filter(self, p): + return self._filter_common.filter(p) + + def _initFilter(self): + I = self._I + r = self._radius + self._I_mean = cv2.blur(I, (r, r)) + I_mean_sq = cv2.blur(I**2, (r, r)) + self._I_var = I_mean_sq - self._I_mean**2 + + def _computeCoefficients(self, p): + r = self._radius + p_mean = cv2.blur(p, (r, r)) + p_cov = p_mean - self._I_mean * p_mean + a = p_cov / (self._I_var + self._epsilon) + b = p_mean - a * self._I_mean + a_mean = cv2.blur(a, (r, r)) + b_mean = cv2.blur(b, (r, r)) + return a_mean, b_mean + + def _computeOutput(self, ab, I): + a_mean, b_mean = ab + return a_mean * I + b_mean + + +## Guided filter for color guidance image. +class GuidedFilterColor: + # @param I Input color guidance image. + # @param radius Radius of Guided Filter. + # @param epsilon Regularization term of Guided Filter. + def __init__(self, I, radius=5, epsilon=0.2): + self._radius = 2 * radius + 1 + self._epsilon = epsilon + self._I = to32F(I) + self._initFilter() + self._filter_common = GuidedFilterCommon(self) + + ## Apply filter for the input image. + # @param p Input image for the filtering. + def filter(self, p): + return self._filter_common.filter(p) + + def _initFilter(self): + I = self._I + r = self._radius + eps = self._epsilon + + Ir, Ig, Ib = I[:, :, 0], I[:, :, 1], I[:, :, 2] + + self._Ir_mean = cv2.blur(Ir, (r, r)) + self._Ig_mean = cv2.blur(Ig, (r, r)) + self._Ib_mean = cv2.blur(Ib, (r, r)) + + Irr_var = cv2.blur(Ir**2, (r, r)) - self._Ir_mean**2 + eps + Irg_var = cv2.blur(Ir * Ig, (r, r)) - self._Ir_mean * self._Ig_mean + Irb_var = cv2.blur(Ir * Ib, (r, r)) - self._Ir_mean * self._Ib_mean + Igg_var = cv2.blur(Ig * Ig, (r, r)) - self._Ig_mean * self._Ig_mean + eps + Igb_var = cv2.blur(Ig * Ib, (r, r)) - self._Ig_mean * self._Ib_mean + Ibb_var = cv2.blur(Ib * Ib, (r, r)) - self._Ib_mean * self._Ib_mean + eps + + Irr_inv = Igg_var * Ibb_var - Igb_var * Igb_var + Irg_inv = Igb_var * Irb_var - Irg_var * Ibb_var + Irb_inv = Irg_var * Igb_var - Igg_var * Irb_var + Igg_inv = Irr_var * Ibb_var - Irb_var * Irb_var + Igb_inv = Irb_var * Irg_var - Irr_var * Igb_var + Ibb_inv = Irr_var * Igg_var - Irg_var * Irg_var + + I_cov = Irr_inv * Irr_var + Irg_inv * Irg_var + Irb_inv * Irb_var + Irr_inv /= I_cov + Irg_inv /= I_cov + Irb_inv /= I_cov + Igg_inv /= I_cov + Igb_inv /= I_cov + Ibb_inv /= I_cov + + self._Irr_inv = Irr_inv + self._Irg_inv = Irg_inv + self._Irb_inv = Irb_inv + self._Igg_inv = Igg_inv + self._Igb_inv = Igb_inv + self._Ibb_inv = Ibb_inv + + def _computeCoefficients(self, p): + r = self._radius + I = self._I + Ir, Ig, Ib = I[:, :, 0], I[:, :, 1], I[:, :, 2] + + p_mean = cv2.blur(p, (r, r)) + + Ipr_mean = cv2.blur(Ir * p, (r, r)) + Ipg_mean = cv2.blur(Ig * p, (r, r)) + Ipb_mean = cv2.blur(Ib * p, (r, r)) + + Ipr_cov = Ipr_mean - self._Ir_mean * p_mean + Ipg_cov = Ipg_mean - self._Ig_mean * p_mean + Ipb_cov = Ipb_mean - self._Ib_mean * p_mean + + ar = self._Irr_inv * Ipr_cov + self._Irg_inv * Ipg_cov + self._Irb_inv * Ipb_cov + ag = self._Irg_inv * Ipr_cov + self._Igg_inv * Ipg_cov + self._Igb_inv * Ipb_cov + ab = self._Irb_inv * Ipr_cov + self._Igb_inv * Ipg_cov + self._Ibb_inv * Ipb_cov + b = p_mean - ar * self._Ir_mean - ag * self._Ig_mean - ab * self._Ib_mean + + ar_mean = cv2.blur(ar, (r, r)) + ag_mean = cv2.blur(ag, (r, r)) + ab_mean = cv2.blur(ab, (r, r)) + b_mean = cv2.blur(b, (r, r)) + + return ar_mean, ag_mean, ab_mean, b_mean + + def _computeOutput(self, ab, I): + ar_mean, ag_mean, ab_mean, b_mean = ab + + Ir, Ig, Ib = I[:, :, 0], I[:, :, 1], I[:, :, 2] + + q = ar_mean * Ir + ag_mean * Ig + ab_mean * Ib + b_mean + + return q diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 07f1cdb34b3..485a5bc8362 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -7293,145 +7293,145 @@ export type components = { project_id: string | null; }; InvocationOutputMap: { - img_channel_offset: components["schemas"]["ImageOutput"]; - create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; - round_float: components["schemas"]["FloatOutput"]; - img_blur: components["schemas"]["ImageOutput"]; - sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; - lblend: components["schemas"]["LatentsOutput"]; - img_lerp: components["schemas"]["ImageOutput"]; - zoe_depth_image_processor: components["schemas"]["ImageOutput"]; - range_of_size: components["schemas"]["IntegerCollectionOutput"]; + img_ilerp: components["schemas"]["ImageOutput"]; freeu: components["schemas"]["UNetOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - latents: components["schemas"]["LatentsOutput"]; - controlnet: components["schemas"]["ControlOutput"]; - canny_image_processor: components["schemas"]["ImageOutput"]; - color_map_image_processor: components["schemas"]["ImageOutput"]; - l2i: components["schemas"]["ImageOutput"]; - string_join_three: components["schemas"]["StringOutput"]; - crop_latents: components["schemas"]["LatentsOutput"]; - img_resize: components["schemas"]["ImageOutput"]; - img_crop: components["schemas"]["ImageOutput"]; - face_off: components["schemas"]["FaceOffOutput"]; - seamless: components["schemas"]["SeamlessModeOutput"]; - string: components["schemas"]["StringOutput"]; - img_watermark: components["schemas"]["ImageOutput"]; - img_paste: components["schemas"]["ImageOutput"]; - ideal_size: components["schemas"]["IdealSizeOutput"]; img_scale: components["schemas"]["ImageOutput"]; - mul: components["schemas"]["IntegerOutput"]; - conditioning: components["schemas"]["ConditioningOutput"]; - add: components["schemas"]["IntegerOutput"]; - conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; - create_gradient_mask: components["schemas"]["GradientMaskOutput"]; - float: components["schemas"]["FloatOutput"]; - sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; - string_collection: components["schemas"]["StringCollectionOutput"]; - image_mask_to_tensor: components["schemas"]["MaskOutput"]; - infill_cv2: components["schemas"]["ImageOutput"]; + depth_anything_image_processor: components["schemas"]["ImageOutput"]; + string_join_three: components["schemas"]["StringOutput"]; + step_param_easing: components["schemas"]["FloatCollectionOutput"]; + float_collection: components["schemas"]["FloatCollectionOutput"]; + sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + canny_image_processor: components["schemas"]["ImageOutput"]; + infill_tile: components["schemas"]["ImageOutput"]; + mediapipe_face_processor: components["schemas"]["ImageOutput"]; + mask_combine: components["schemas"]["ImageOutput"]; + float_range: components["schemas"]["FloatCollectionOutput"]; boolean_collection: components["schemas"]["BooleanCollectionOutput"]; + div: components["schemas"]["IntegerOutput"]; + integer_math: components["schemas"]["IntegerOutput"]; + mask_from_id: components["schemas"]["ImageOutput"]; + lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + hed_image_processor: components["schemas"]["ImageOutput"]; + tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; + integer_collection: components["schemas"]["IntegerCollectionOutput"]; + img_conv: components["schemas"]["ImageOutput"]; + lscale: components["schemas"]["LatentsOutput"]; color: components["schemas"]["ColorOutput"]; - lresize: components["schemas"]["LatentsOutput"]; - image: components["schemas"]["ImageOutput"]; - esrgan: components["schemas"]["ImageOutput"]; - image_collection: components["schemas"]["ImageCollectionOutput"]; metadata: components["schemas"]["MetadataOutput"]; + lineart_anime_image_processor: components["schemas"]["ImageOutput"]; + color_map_image_processor: components["schemas"]["ImageOutput"]; + create_gradient_mask: components["schemas"]["GradientMaskOutput"]; + round_float: components["schemas"]["FloatOutput"]; + mask_edge: components["schemas"]["ImageOutput"]; + sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + seamless: components["schemas"]["SeamlessModeOutput"]; + zoe_depth_image_processor: components["schemas"]["ImageOutput"]; + t2i_adapter: components["schemas"]["T2IAdapterOutput"]; scheduler: components["schemas"]["SchedulerOutput"]; - img_pad_crop: components["schemas"]["ImageOutput"]; - integer: components["schemas"]["IntegerOutput"]; - boolean: components["schemas"]["BooleanOutput"]; - float_math: components["schemas"]["FloatOutput"]; - sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; - tile_image_processor: components["schemas"]["ImageOutput"]; img_channel_multiply: components["schemas"]["ImageOutput"]; - tomask: components["schemas"]["ImageOutput"]; - sub: components["schemas"]["IntegerOutput"]; - img_mul: components["schemas"]["ImageOutput"]; + add: components["schemas"]["IntegerOutput"]; + dw_openpose_image_processor: components["schemas"]["ImageOutput"]; + l2i: components["schemas"]["ImageOutput"]; + tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; + unsharp_mask: components["schemas"]["ImageOutput"]; + segment_anything_processor: components["schemas"]["ImageOutput"]; + infill_rgba: components["schemas"]["ImageOutput"]; + collect: components["schemas"]["CollectInvocationOutput"]; string_split: components["schemas"]["String2Output"]; - model_identifier: components["schemas"]["ModelIdentifierOutput"]; - pair_tile_image: components["schemas"]["PairTileImageOutput"]; - heuristic_resize: components["schemas"]["ImageOutput"]; - infill_lama: components["schemas"]["ImageOutput"]; - lineart_anime_image_processor: components["schemas"]["ImageOutput"]; - float_range: components["schemas"]["FloatCollectionOutput"]; - infill_patchmatch: components["schemas"]["ImageOutput"]; - mediapipe_face_processor: components["schemas"]["ImageOutput"]; + show_image: components["schemas"]["ImageOutput"]; + image: components["schemas"]["ImageOutput"]; + core_metadata: components["schemas"]["MetadataOutput"]; + img_chan: components["schemas"]["ImageOutput"]; + range_of_size: components["schemas"]["IntegerCollectionOutput"]; + sub: components["schemas"]["IntegerOutput"]; + lblend: components["schemas"]["LatentsOutput"]; + sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; + ip_adapter: components["schemas"]["IPAdapterOutput"]; + calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; + color_correct: components["schemas"]["ImageOutput"]; + midas_depth_image_processor: components["schemas"]["ImageOutput"]; cv_inpaint: components["schemas"]["ImageOutput"]; - denoise_latents: components["schemas"]["LatentsOutput"]; - range: components["schemas"]["IntegerCollectionOutput"]; - content_shuffle_image_processor: components["schemas"]["ImageOutput"]; - compel: components["schemas"]["ConditioningOutput"]; - leres_image_processor: components["schemas"]["ImageOutput"]; - prompt_from_file: components["schemas"]["StringCollectionOutput"]; - noise: components["schemas"]["NoiseOutput"]; - i2l: components["schemas"]["LatentsOutput"]; - pidi_image_processor: components["schemas"]["ImageOutput"]; merge_metadata: components["schemas"]["MetadataOutput"]; - img_conv: components["schemas"]["ImageOutput"]; - ip_adapter: components["schemas"]["IPAdapterOutput"]; - div: components["schemas"]["IntegerOutput"]; - dw_openpose_image_processor: components["schemas"]["ImageOutput"]; - rand_int: components["schemas"]["IntegerOutput"]; - calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; - show_image: components["schemas"]["ImageOutput"]; + tomask: components["schemas"]["ImageOutput"]; + sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; + compel: components["schemas"]["ConditioningOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + image_collection: components["schemas"]["ImageCollectionOutput"]; + img_resize: components["schemas"]["ImageOutput"]; + metadata_item: components["schemas"]["MetadataItemOutput"]; + dynamic_prompt: components["schemas"]["StringCollectionOutput"]; + img_lerp: components["schemas"]["ImageOutput"]; + clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; + iterate: components["schemas"]["IterateInvocationOutput"]; + rand_float: components["schemas"]["FloatOutput"]; + string: components["schemas"]["StringOutput"]; + save_image: components["schemas"]["ImageOutput"]; + infill_lama: components["schemas"]["ImageOutput"]; + vae_loader: components["schemas"]["VAEOutput"]; + mul: components["schemas"]["IntegerOutput"]; calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; - float_to_int: components["schemas"]["IntegerOutput"]; - infill_rgba: components["schemas"]["ImageOutput"]; - face_mask_detection: components["schemas"]["FaceMaskOutput"]; + img_pad_crop: components["schemas"]["ImageOutput"]; + crop_latents: components["schemas"]["LatentsOutput"]; + img_nsfw: components["schemas"]["ImageOutput"]; + heuristic_resize: components["schemas"]["ImageOutput"]; + image_mask_to_tensor: components["schemas"]["MaskOutput"]; + blank_image: components["schemas"]["ImageOutput"]; + img_channel_offset: components["schemas"]["ImageOutput"]; + conditioning: components["schemas"]["ConditioningOutput"]; + controlnet: components["schemas"]["ControlOutput"]; sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - iterate: components["schemas"]["IterateInvocationOutput"]; - rectangle_mask: components["schemas"]["MaskOutput"]; + normalbae_image_processor: components["schemas"]["ImageOutput"]; + face_mask_detection: components["schemas"]["FaceMaskOutput"]; + range: components["schemas"]["IntegerCollectionOutput"]; + noise: components["schemas"]["NoiseOutput"]; + img_paste: components["schemas"]["ImageOutput"]; + prompt_from_file: components["schemas"]["StringCollectionOutput"]; + conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + content_shuffle_image_processor: components["schemas"]["ImageOutput"]; + float_math: components["schemas"]["FloatOutput"]; + i2l: components["schemas"]["LatentsOutput"]; + denoise_latents: components["schemas"]["LatentsOutput"]; merge_tiles_to_image: components["schemas"]["ImageOutput"]; - lineart_image_processor: components["schemas"]["ImageOutput"]; - lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + img_mul: components["schemas"]["ImageOutput"]; + leres_image_processor: components["schemas"]["ImageOutput"]; + model_identifier: components["schemas"]["ModelIdentifierOutput"]; + string_split_neg: components["schemas"]["StringPosNegOutput"]; + boolean: components["schemas"]["BooleanOutput"]; + create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; + latents_collection: components["schemas"]["LatentsCollectionOutput"]; + mlsd_image_processor: components["schemas"]["ImageOutput"]; + calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; + float: components["schemas"]["FloatOutput"]; + img_hue_adjust: components["schemas"]["ImageOutput"]; lora_selector: components["schemas"]["LoRASelectorOutput"]; - midas_depth_image_processor: components["schemas"]["ImageOutput"]; + invert_tensor_mask: components["schemas"]["MaskOutput"]; + face_off: components["schemas"]["FaceOffOutput"]; + random_range: components["schemas"]["IntegerCollectionOutput"]; + lineart_image_processor: components["schemas"]["ImageOutput"]; + img_watermark: components["schemas"]["ImageOutput"]; + rectangle_mask: components["schemas"]["MaskOutput"]; + ideal_size: components["schemas"]["IdealSizeOutput"]; + string_collection: components["schemas"]["StringCollectionOutput"]; + canvas_paste_back: components["schemas"]["ImageOutput"]; + infill_patchmatch: components["schemas"]["ImageOutput"]; + string_replace: components["schemas"]["StringOutput"]; + integer: components["schemas"]["IntegerOutput"]; + sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; + pidi_image_processor: components["schemas"]["ImageOutput"]; face_identifier: components["schemas"]["ImageOutput"]; - save_image: components["schemas"]["ImageOutput"]; - unsharp_mask: components["schemas"]["ImageOutput"]; - string_split_neg: components["schemas"]["StringPosNegOutput"]; - img_chan: components["schemas"]["ImageOutput"]; - float_collection: components["schemas"]["FloatCollectionOutput"]; + esrgan: components["schemas"]["ImageOutput"]; + lresize: components["schemas"]["LatentsOutput"]; + img_crop: components["schemas"]["ImageOutput"]; + latents: components["schemas"]["LatentsOutput"]; lora_loader: components["schemas"]["LoRALoaderOutput"]; - random_range: components["schemas"]["IntegerCollectionOutput"]; - invert_tensor_mask: components["schemas"]["MaskOutput"]; - clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; - sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - string_join: components["schemas"]["StringOutput"]; - img_hue_adjust: components["schemas"]["ImageOutput"]; - img_nsfw: components["schemas"]["ImageOutput"]; - tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; - calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; - mask_combine: components["schemas"]["ImageOutput"]; + infill_cv2: components["schemas"]["ImageOutput"]; + float_to_int: components["schemas"]["IntegerOutput"]; + pair_tile_image: components["schemas"]["PairTileImageOutput"]; + img_blur: components["schemas"]["ImageOutput"]; + tile_image_processor: components["schemas"]["ImageOutput"]; main_model_loader: components["schemas"]["ModelLoaderOutput"]; - img_ilerp: components["schemas"]["ImageOutput"]; - string_replace: components["schemas"]["StringOutput"]; - sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; - hed_image_processor: components["schemas"]["ImageOutput"]; - latents_collection: components["schemas"]["LatentsCollectionOutput"]; - infill_tile: components["schemas"]["ImageOutput"]; - vae_loader: components["schemas"]["VAEOutput"]; - depth_anything_image_processor: components["schemas"]["ImageOutput"]; - lscale: components["schemas"]["LatentsOutput"]; - t2i_adapter: components["schemas"]["T2IAdapterOutput"]; - metadata_item: components["schemas"]["MetadataItemOutput"]; - blank_image: components["schemas"]["ImageOutput"]; - tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; - canvas_paste_back: components["schemas"]["ImageOutput"]; - rand_float: components["schemas"]["FloatOutput"]; - mask_from_id: components["schemas"]["ImageOutput"]; - segment_anything_processor: components["schemas"]["ImageOutput"]; - normalbae_image_processor: components["schemas"]["ImageOutput"]; - mask_edge: components["schemas"]["ImageOutput"]; - dynamic_prompt: components["schemas"]["StringCollectionOutput"]; - color_correct: components["schemas"]["ImageOutput"]; - integer_math: components["schemas"]["IntegerOutput"]; - core_metadata: components["schemas"]["MetadataOutput"]; - integer_collection: components["schemas"]["IntegerCollectionOutput"]; - collect: components["schemas"]["CollectInvocationOutput"]; - mlsd_image_processor: components["schemas"]["ImageOutput"]; - step_param_easing: components["schemas"]["FloatCollectionOutput"]; + rand_int: components["schemas"]["IntegerOutput"]; + string_join: components["schemas"]["StringOutput"]; }; /** * InvocationStartedEvent @@ -12823,6 +12823,13 @@ export type components = { * @default 1 */ down_sampling_rate?: number; + /** + * Mode + * @description The controlnet tile model being used + * @default regular + * @enum {string} + */ + mode?: "regular" | "blur" | "var" | "super"; /** * type * @default tile_image_processor