Spaces:
Running
Running
| from diffusers import StableDiffusionInpaintPipeline | |
| from typing import Dict, Union | |
| import numpy as np | |
| import torch | |
| from diffusers import ( | |
| AutoencoderKL, | |
| DDIMScheduler, | |
| DiffusionPipeline, | |
| LCMScheduler, | |
| UNet2DConditionModel, | |
| DDPMScheduler, | |
| ) | |
| from diffusers.models import AsymmetricAutoencoderKL | |
| from diffusers.utils import ( | |
| BaseOutput, | |
| deprecate | |
| ) | |
| from diffusers.callbacks import( | |
| MultiPipelineCallbacks, | |
| PipelineCallback | |
| ) | |
| from PIL import Image | |
| from torchvision.transforms.functional import resize, pil_to_tensor | |
| from torchvision.transforms import InterpolationMode | |
| from torch.utils.data import DataLoader, TensorDataset | |
| from tqdm.auto import tqdm | |
| from transformers import CLIPTextModel, CLIPTokenizer | |
| import PIL.Image | |
| import random | |
| from typing import Any, Callable, Dict, List, Optional, Tuple, Union | |
| from dataclasses import dataclass | |
| from diffusers.image_processor import PipelineImageInput | |
| from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput | |
| import torch.nn.functional as F | |
| def create_laplacian_pyramid_kernel(device): | |
| """Create Gaussian kernel for Laplacian pyramid | |
| Args: | |
| device: Computation device | |
| Returns: | |
| Gaussian kernel tensor | |
| """ | |
| kernel = torch.tensor([ | |
| [0.0625, 0.125, 0.0625], | |
| [0.125, 0.25, 0.125], | |
| [0.0625, 0.125, 0.0625] | |
| ], device=device).unsqueeze(0).unsqueeze(0) | |
| return kernel | |
| def apply_laplacian_highpass(latents, level=1): | |
| """Apply Laplacian pyramid high-frequency extraction to VAE latents""" | |
| device = latents.device | |
| batch_size, channels, height, width = latents.size() | |
| # Create Gaussian kernel for grouped convolution | |
| base_kernel = create_laplacian_pyramid_kernel(device).to(dtype=latents.dtype) | |
| conv_kernel = base_kernel.repeat(channels, 1, 1, 1) # [C, 1, 3, 3] | |
| highpass_latents = torch.zeros_like(latents) | |
| current_tensor = latents | |
| for l in range(level): | |
| # Gaussian blur (independent per channel) | |
| blurred = F.conv2d( | |
| current_tensor, | |
| conv_kernel, | |
| padding=1, | |
| groups=channels | |
| ) | |
| # Calculate high-frequency component | |
| highfreq = current_tensor - blurred | |
| # Downsample for next level | |
| current_tensor = F.avg_pool2d(blurred, kernel_size=2) | |
| # Accumulate high-frequency components | |
| if l == 0: | |
| highpass_latents = highfreq | |
| else: | |
| # Upsample back to original resolution and add | |
| highpass_latents += F.interpolate( | |
| highfreq, | |
| size=(height, width), | |
| mode='bilinear', | |
| align_corners=False | |
| ) | |
| return highpass_latents | |
| class OneStepLaplacianInpaintPipeline(StableDiffusionInpaintPipeline): | |
| def encode(self, image: torch.Tensor) -> torch.Tensor: | |
| """ | |
| Encode RGB image into latent. | |
| Args: | |
| rgb_in (`torch.Tensor`): | |
| Input RGB image to be encoded. | |
| Returns: | |
| `torch.Tensor`: Image latent. | |
| """ | |
| # encode | |
| h = self.vae.encoder(image) | |
| moments = self.vae.quant_conv(h) | |
| mean, logvar = torch.chunk(moments, 2, dim=1) | |
| # scale latent | |
| rgb_latent = mean * self.vae.config.scaling_factor | |
| return rgb_latent | |
| def prepare_mask_latents( | |
| self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance | |
| ): | |
| # resize the mask to latents shape as we concatenate the mask to the latents | |
| # we do that before converting to dtype to avoid breaking in case we're using cpu_offload | |
| # and half precision | |
| mask = torch.nn.functional.interpolate( | |
| mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) | |
| ) | |
| mask = mask.to(device=device, dtype=dtype) | |
| masked_image = masked_image.to(device=device, dtype=dtype) | |
| if masked_image.shape[1] == 4: | |
| masked_image_latents = masked_image | |
| else: | |
| masked_image_latents = self.encode(masked_image) | |
| # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method | |
| if mask.shape[0] < batch_size: | |
| if not batch_size % mask.shape[0] == 0: | |
| raise ValueError( | |
| "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" | |
| f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" | |
| " of masks that you pass is divisible by the total requested batch size." | |
| ) | |
| mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) | |
| if masked_image_latents.shape[0] < batch_size: | |
| if not batch_size % masked_image_latents.shape[0] == 0: | |
| raise ValueError( | |
| "The passed images and the required batch size don't match. Images are supposed to be duplicated" | |
| f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." | |
| " Make sure the number of images that you pass is divisible by the total requested batch size." | |
| ) | |
| masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) | |
| mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask | |
| masked_image_latents = ( | |
| torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents | |
| ) | |
| # aligning device to prevent device errors when concating it with the latent model input | |
| masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) | |
| return mask, masked_image_latents | |
| def prepare_latents( | |
| self, | |
| batch_size, | |
| num_channels_latents, | |
| height, | |
| width, | |
| dtype, | |
| device, | |
| generator, | |
| latents=None, | |
| image=None, | |
| timestep=None, | |
| is_strength_max=True, | |
| return_noise=False, | |
| return_image_latents=False, | |
| ): | |
| shape = ( | |
| batch_size, | |
| num_channels_latents, | |
| int(height) // self.vae_scale_factor, | |
| int(width) // self.vae_scale_factor, | |
| ) | |
| if isinstance(generator, list) and len(generator) != batch_size: | |
| raise ValueError( | |
| f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" | |
| f" size of {batch_size}. Make sure the batch size matches the length of the generators." | |
| ) | |
| if (image is None or timestep is None) and not is_strength_max: | |
| raise ValueError( | |
| "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." | |
| "However, either the image or the noise timestep has not been provided." | |
| ) | |
| if return_image_latents or (latents is None and not is_strength_max): | |
| image = image.to(device=device, dtype=dtype) | |
| if image.shape[1] == 4: | |
| image_latents = image | |
| else: | |
| image_latents = self.encode(image=image) | |
| image_latents = apply_laplacian_highpass(image_latents, level=2) | |
| image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) | |
| if latents is None: | |
| noise = torch.zeros_like(image_latents) | |
| # if strength is 1. then initialise the latents to noise, else initial to image + noise | |
| latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) | |
| # latents = noise | |
| # if pure noise then scale the initial latents by the Scheduler's init sigma | |
| latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents | |
| else: | |
| noise = latents.to(device) | |
| latents = noise * self.scheduler.init_noise_sigma | |
| outputs = (latents,) | |
| if return_noise: | |
| outputs += (noise,) | |
| if return_image_latents: | |
| outputs += (image_latents,) | |
| return outputs | |
| def __call__( | |
| self, | |
| prompt: Union[str, List[str]] = None, | |
| image: PipelineImageInput = None, | |
| mask_image: PipelineImageInput = None, | |
| masked_image_latents: torch.Tensor = None, | |
| canny_image: PipelineImageInput = None, | |
| height: Optional[int] = None, | |
| width: Optional[int] = None, | |
| padding_mask_crop: Optional[int] = None, | |
| strength: float = 0.999, | |
| num_inference_steps: int = 50, | |
| timesteps: List[int] = None, | |
| sigmas: List[float] = None, | |
| guidance_scale: float = 7.5, | |
| negative_prompt: Optional[Union[str, List[str]]] = None, | |
| num_images_per_prompt: Optional[int] = 1, | |
| eta: float = 0.0, | |
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | |
| latents: Optional[torch.Tensor] = None, | |
| prompt_embeds: Optional[torch.Tensor] = None, | |
| negative_prompt_embeds: Optional[torch.Tensor] = None, | |
| ip_adapter_image: Optional[PipelineImageInput] = None, | |
| ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, | |
| output_type: Optional[str] = "pil", | |
| return_dict: bool = True, | |
| cross_attention_kwargs: Optional[Dict[str, Any]] = None, | |
| clip_skip: int = None, | |
| single_timestep: int = 1000, | |
| callback_on_step_end: Optional[ | |
| Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] | |
| ] = None, | |
| callback_on_step_end_tensor_inputs: List[str] = ["latents"], | |
| **kwargs, | |
| ): | |
| r""" | |
| The call function to the pipeline for generation. | |
| Args: | |
| prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. | |
| image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): | |
| `Image`, numpy array or tensor representing an image batch to be inpainted (which parts of the image to | |
| be masked out with `mask_image` and repainted according to `prompt`). For both numpy array and pytorch | |
| tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the | |
| expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the | |
| expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but | |
| if passing latents directly it is not encoded again. | |
| mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): | |
| `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask | |
| are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, | |
| H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, | |
| 1)`, or `(H, W)`. | |
| height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): | |
| The height in pixels of the generated image. | |
| width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): | |
| The width in pixels of the generated image. | |
| padding_mask_crop (`int`, *optional*, defaults to `None`): | |
| The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to | |
| image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region | |
| with the same aspect ration of the image and contains all masked area, and then expand that area based | |
| on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before | |
| resizing to the original image size for inpainting. This is useful when the masked area is small while | |
| the image is large and contain information irrelevant for inpainting, such as background. | |
| strength (`float`, *optional*, defaults to 1.0): | |
| Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a | |
| starting point and more noise is added the higher the `strength`. The number of denoising steps depends | |
| on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 | |
| essentially ignores `image`. | |
| num_inference_steps (`int`, *optional*, defaults to 50): | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter is modulated by `strength`. | |
| timesteps (`List[int]`, *optional*): | |
| Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument | |
| in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is | |
| passed will be used. Must be in descending order. | |
| sigmas (`List[float]`, *optional*): | |
| Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in | |
| their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed | |
| will be used. | |
| guidance_scale (`float`, *optional*, defaults to 7.5): | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. | |
| negative_prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). | |
| num_images_per_prompt (`int`, *optional*, defaults to 1): | |
| The number of images to generate per prompt. | |
| eta (`float`, *optional*, defaults to 0.0): | |
| Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies | |
| to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. | |
| generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | |
| A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make | |
| generation deterministic. | |
| latents (`torch.Tensor`, *optional*): | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor is generated by sampling using the supplied random `generator`. | |
| prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the `prompt` input argument. | |
| negative_prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. | |
| ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. | |
| ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should | |
| contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not | |
| provided, embeddings are computed from the `ip_adapter_image` input argument. | |
| output_type (`str`, *optional*, defaults to `"pil"`): | |
| The output format of the generated image. Choose between `PIL.Image` or `np.array`. | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a | |
| plain tuple. | |
| cross_attention_kwargs (`dict`, *optional*): | |
| A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in | |
| [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | |
| clip_skip (`int`, *optional*): | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings. | |
| callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): | |
| A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of | |
| each denoising step during the inference. with the following arguments: `callback_on_step_end(self: | |
| DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a | |
| list of all tensors as specified by `callback_on_step_end_tensor_inputs`. | |
| callback_on_step_end_tensor_inputs (`List`, *optional*): | |
| The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list | |
| will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the | |
| `._callback_tensor_inputs` attribute of your pipeline class. | |
| Examples: | |
| ```py | |
| >>> import PIL | |
| >>> import requests | |
| >>> import torch | |
| >>> from io import BytesIO | |
| >>> from diffusers import StableDiffusionInpaintPipeline | |
| >>> def download_image(url): | |
| ... response = requests.get(url) | |
| ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") | |
| >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" | |
| >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" | |
| >>> init_image = download_image(img_url).resize((512, 512)) | |
| >>> mask_image = download_image(mask_url).resize((512, 512)) | |
| >>> pipe = StableDiffusionInpaintPipeline.from_pretrained( | |
| ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16 | |
| ... ) | |
| >>> pipe = pipe.to("cuda") | |
| >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" | |
| >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] | |
| ``` | |
| Returns: | |
| [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: | |
| If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, | |
| otherwise a `tuple` is returned where the first element is a list with the generated images and the | |
| second element is a list of `bool`s indicating whether the corresponding generated image contains | |
| "not-safe-for-work" (nsfw) content. | |
| """ | |
| callback = kwargs.pop("callback", None) | |
| callback_steps = kwargs.pop("callback_steps", None) | |
| if callback is not None: | |
| deprecate( | |
| "callback", | |
| "1.0.0", | |
| "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", | |
| ) | |
| if callback_steps is not None: | |
| deprecate( | |
| "callback_steps", | |
| "1.0.0", | |
| "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", | |
| ) | |
| if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): | |
| callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs | |
| # 0. Default height and width to unet | |
| height = height or self.unet.config.sample_size * self.vae_scale_factor | |
| width = width or self.unet.config.sample_size * self.vae_scale_factor | |
| # 1. Check inputs | |
| self.check_inputs( | |
| prompt, | |
| image, | |
| mask_image, | |
| height, | |
| width, | |
| strength, | |
| callback_steps, | |
| output_type, | |
| negative_prompt, | |
| prompt_embeds, | |
| negative_prompt_embeds, | |
| ip_adapter_image, | |
| ip_adapter_image_embeds, | |
| callback_on_step_end_tensor_inputs, | |
| padding_mask_crop, | |
| ) | |
| self._guidance_scale = guidance_scale | |
| self._clip_skip = clip_skip | |
| self._cross_attention_kwargs = cross_attention_kwargs | |
| self._interrupt = False | |
| # 2. Define call parameters | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| device = self._execution_device | |
| # 3. Encode input prompt | |
| text_encoder_lora_scale = ( | |
| cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None | |
| ) | |
| prompt_embeds, negative_prompt_embeds = self.encode_prompt( | |
| prompt, | |
| device, | |
| num_images_per_prompt, | |
| self.do_classifier_free_guidance, | |
| negative_prompt, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_prompt_embeds, | |
| lora_scale=text_encoder_lora_scale, | |
| clip_skip=self.clip_skip, | |
| ) | |
| # For classifier free guidance, we need to do two forward passes. | |
| # Here we concatenate the unconditional and text embeddings into a single batch | |
| # to avoid doing two forward passes | |
| if self.do_classifier_free_guidance: | |
| prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) | |
| if ip_adapter_image is not None or ip_adapter_image_embeds is not None: | |
| image_embeds = self.prepare_ip_adapter_image_embeds( | |
| ip_adapter_image, | |
| ip_adapter_image_embeds, | |
| device, | |
| batch_size * num_images_per_prompt, | |
| self.do_classifier_free_guidance, | |
| ) | |
| # 4. set timesteps | |
| timesteps = torch.tensor([single_timestep - 1]) | |
| # check that number of inference steps is not < 1 - as this doesn't make sense | |
| if num_inference_steps < 1: | |
| raise ValueError( | |
| f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" | |
| f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." | |
| ) | |
| # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) | |
| latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) | |
| # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise | |
| is_strength_max = strength == 1.0 | |
| # 5. Preprocess mask and image | |
| if padding_mask_crop is not None: | |
| crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) | |
| resize_mode = "fill" | |
| else: | |
| crops_coords = None | |
| resize_mode = "default" | |
| original_image = image | |
| init_image = self.image_processor.preprocess( | |
| image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode | |
| ) | |
| init_image = init_image.to(dtype=torch.float32) | |
| # 6. Prepare latent variables | |
| num_channels_latents = self.vae.config.latent_channels | |
| num_channels_unet = self.unet.config.in_channels | |
| return_image_latents = num_channels_unet == 9 | |
| latents_outputs = self.prepare_latents( | |
| batch_size * num_images_per_prompt, | |
| num_channels_latents, | |
| height, | |
| width, | |
| prompt_embeds.dtype, | |
| device, | |
| generator, | |
| latents, | |
| image=init_image, | |
| timestep=latent_timestep, | |
| is_strength_max=is_strength_max, | |
| return_noise=True, | |
| return_image_latents=return_image_latents, | |
| ) | |
| if return_image_latents: | |
| latents, noise, image_latents = latents_outputs | |
| else: | |
| latents, noise = latents_outputs | |
| org_latents = latents | |
| # 7. Prepare mask latent variables | |
| mask_condition = self.mask_processor.preprocess( | |
| mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords | |
| ) | |
| if masked_image_latents is None: | |
| masked_image = init_image * (1 - mask_condition) | |
| else: | |
| masked_image = masked_image_latents | |
| mask, masked_image_latents = self.prepare_mask_latents( | |
| mask_condition, | |
| masked_image, | |
| batch_size * num_images_per_prompt, | |
| height, | |
| width, | |
| prompt_embeds.dtype, | |
| device, | |
| generator, | |
| self.do_classifier_free_guidance, | |
| ) | |
| # 8. Check that sizes of mask, masked image and latents match | |
| if num_channels_unet == 9: | |
| # default case for runwayml/stable-diffusion-inpainting | |
| num_channels_mask = mask.shape[1] | |
| num_channels_masked_image = masked_image_latents.shape[1] | |
| if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: | |
| raise ValueError( | |
| f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" | |
| f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" | |
| f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" | |
| f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" | |
| " `pipeline.unet` or your `mask_image` or `image` input." | |
| ) | |
| elif num_channels_unet != 4 and canny_image is None: | |
| raise ValueError( | |
| f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." | |
| ) | |
| # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline | |
| extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) | |
| # 9.1 Add image embeds for IP-Adapter | |
| added_cond_kwargs = ( | |
| {"image_embeds": image_embeds} | |
| if ip_adapter_image is not None or ip_adapter_image_embeds is not None | |
| else None | |
| ) | |
| # 9.2 Optionally get Guidance Scale Embedding | |
| timestep_cond = None | |
| if self.unet.config.time_cond_proj_dim is not None: | |
| guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) | |
| timestep_cond = self.get_guidance_scale_embedding( | |
| guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim | |
| ).to(device=device, dtype=latents.dtype) | |
| # 10. Denoising loop | |
| num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order | |
| self._num_timesteps = len(timesteps) | |
| # print("Inference time steps", (timesteps)) | |
| with self.progress_bar(total=num_inference_steps) as progress_bar: | |
| for i, t in enumerate(timesteps): | |
| if self.interrupt: | |
| continue | |
| # expand the latents if we are doing classifier free guidance | |
| latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents | |
| # concat latents, mask, masked_image_latents in the channel dimension | |
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
| if num_channels_unet == 9: | |
| latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) | |
| # elif num_channels_unet == 10 and canny_image is not None: | |
| # latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents, canny_image], dim=1) | |
| # predict the noise residual | |
| noise_pred = self.unet( | |
| latent_model_input, | |
| t, | |
| encoder_hidden_states=prompt_embeds, | |
| return_dict=False, | |
| )[0] | |
| # perform guidance | |
| if self.do_classifier_free_guidance: | |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
| noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) | |
| # compute the previous noisy sample x_t -> x_t-1 | |
| # latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] | |
| latents = self.scheduler.step(noise_pred, t, latents) | |
| latents = latents.pred_original_sample | |
| # init_mask, _ = mask.chunk(2) | |
| # latents = (1 - init_mask) * image_latents + init_mask * latents | |
| if num_channels_unet == 4: | |
| init_latents_proper = image_latents | |
| if self.do_classifier_free_guidance: | |
| init_mask, _ = mask.chunk(2) | |
| else: | |
| init_mask = mask | |
| if i < len(timesteps) - 1: | |
| noise_timestep = timesteps[i + 1] | |
| init_latents_proper = self.scheduler.add_noise( | |
| init_latents_proper, noise, torch.tensor([noise_timestep]) | |
| ) | |
| latents = (1 - init_mask) * init_latents_proper + init_mask * latents | |
| if callback_on_step_end is not None: | |
| callback_kwargs = {} | |
| for k in callback_on_step_end_tensor_inputs: | |
| callback_kwargs[k] = locals()[k] | |
| callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) | |
| latents = callback_outputs.pop("latents", latents) | |
| prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) | |
| negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) | |
| mask = callback_outputs.pop("mask", mask) | |
| masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) | |
| # call the callback, if provided | |
| if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | |
| progress_bar.update() | |
| if callback is not None and i % callback_steps == 0: | |
| step_idx = i // getattr(self.scheduler, "order", 1) | |
| callback(step_idx, t, latents) | |
| if not output_type == "latent": | |
| condition_kwargs = {} | |
| if isinstance(self.vae, AsymmetricAutoencoderKL): | |
| init_image = init_image.to(device=device, dtype=masked_image_latents.dtype) | |
| init_image_condition = init_image.clone() | |
| init_image = self.encode(init_image) | |
| mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype) | |
| condition_kwargs = {"image": init_image_condition, "mask": mask_condition} | |
| image = self.vae.decode( | |
| latents / self.vae.config.scaling_factor, return_dict=False)[0] | |
| image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) | |
| else: | |
| image = latents | |
| has_nsfw_concept = None | |
| if has_nsfw_concept is None: | |
| do_denormalize = [True] * image.shape[0] | |
| else: | |
| do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] | |
| image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) | |
| if padding_mask_crop is not None: | |
| image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] | |
| # Offload all models | |
| self.maybe_free_model_hooks() | |
| if not return_dict: | |
| return (image, has_nsfw_concept) | |
| return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) | |