| | |
| |
|
| | from typing import Any, Callable, Dict, List, Optional, Tuple, Union |
| |
|
| | import numpy as np |
| | import PIL.Image |
| | import torch |
| |
|
| | from diffusers import StableDiffusionXLPipeline |
| | from diffusers.models.attention import BasicTransformerBlock |
| | from diffusers.models.unet_2d_blocks import ( |
| | CrossAttnDownBlock2D, |
| | CrossAttnUpBlock2D, |
| | DownBlock2D, |
| | UpBlock2D, |
| | ) |
| | from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput |
| | from diffusers.utils import PIL_INTERPOLATION, logging |
| | from diffusers.utils.torch_utils import randn_tensor |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | EXAMPLE_DOC_STRING = """ |
| | Examples: |
| | ```py |
| | >>> import torch |
| | >>> from diffusers import UniPCMultistepScheduler |
| | >>> from diffusers.utils import load_image |
| | |
| | >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png") |
| | |
| | >>> pipe = StableDiffusionXLReferencePipeline.from_pretrained( |
| | "stabilityai/stable-diffusion-xl-base-1.0", |
| | torch_dtype=torch.float16, |
| | use_safetensors=True, |
| | variant="fp16").to('cuda:0') |
| | |
| | >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) |
| | >>> result_img = pipe(ref_image=input_image, |
| | prompt="1girl", |
| | num_inference_steps=20, |
| | reference_attn=True, |
| | reference_adain=True).images[0] |
| | |
| | >>> result_img.show() |
| | ``` |
| | """ |
| |
|
| |
|
| | def torch_dfs(model: torch.nn.Module): |
| | result = [model] |
| | for child in model.children(): |
| | result += torch_dfs(child) |
| | return result |
| |
|
| |
|
| | |
| |
|
| |
|
| | def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): |
| | """ |
| | Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and |
| | Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 |
| | """ |
| | std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) |
| | std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) |
| | |
| | noise_pred_rescaled = noise_cfg * (std_text / std_cfg) |
| | |
| | noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg |
| | return noise_cfg |
| |
|
| |
|
| | class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline): |
| | def _default_height_width(self, height, width, image): |
| | |
| | |
| | |
| | while isinstance(image, list): |
| | image = image[0] |
| |
|
| | if height is None: |
| | if isinstance(image, PIL.Image.Image): |
| | height = image.height |
| | elif isinstance(image, torch.Tensor): |
| | height = image.shape[2] |
| |
|
| | height = (height // 8) * 8 |
| |
|
| | if width is None: |
| | if isinstance(image, PIL.Image.Image): |
| | width = image.width |
| | elif isinstance(image, torch.Tensor): |
| | width = image.shape[3] |
| |
|
| | width = (width // 8) * 8 |
| |
|
| | return height, width |
| |
|
| | def prepare_image( |
| | self, |
| | image, |
| | width, |
| | height, |
| | batch_size, |
| | num_images_per_prompt, |
| | device, |
| | dtype, |
| | do_classifier_free_guidance=False, |
| | guess_mode=False, |
| | ): |
| | if not isinstance(image, torch.Tensor): |
| | if isinstance(image, PIL.Image.Image): |
| | image = [image] |
| |
|
| | if isinstance(image[0], PIL.Image.Image): |
| | images = [] |
| |
|
| | for image_ in image: |
| | image_ = image_.convert("RGB") |
| | image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]) |
| | image_ = np.array(image_) |
| | image_ = image_[None, :] |
| | images.append(image_) |
| |
|
| | image = images |
| |
|
| | image = np.concatenate(image, axis=0) |
| | image = np.array(image).astype(np.float32) / 255.0 |
| | image = (image - 0.5) / 0.5 |
| | image = image.transpose(0, 3, 1, 2) |
| | image = torch.from_numpy(image) |
| |
|
| | elif isinstance(image[0], torch.Tensor): |
| | image = torch.stack(image, dim=0) |
| |
|
| | image_batch_size = image.shape[0] |
| |
|
| | if image_batch_size == 1: |
| | repeat_by = batch_size |
| | else: |
| | repeat_by = num_images_per_prompt |
| |
|
| | image = image.repeat_interleave(repeat_by, dim=0) |
| |
|
| | image = image.to(device=device, dtype=dtype) |
| |
|
| | if do_classifier_free_guidance and not guess_mode: |
| | image = torch.cat([image] * 2) |
| |
|
| | return image |
| |
|
| | def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance): |
| | refimage = refimage.to(device=device) |
| | if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: |
| | self.upcast_vae() |
| | refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) |
| | if refimage.dtype != self.vae.dtype: |
| | refimage = refimage.to(dtype=self.vae.dtype) |
| | |
| | if isinstance(generator, list): |
| | ref_image_latents = [ |
| | self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i]) |
| | for i in range(batch_size) |
| | ] |
| | ref_image_latents = torch.cat(ref_image_latents, dim=0) |
| | else: |
| | ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator) |
| | ref_image_latents = self.vae.config.scaling_factor * ref_image_latents |
| |
|
| | |
| | if ref_image_latents.shape[0] < batch_size: |
| | if not batch_size % ref_image_latents.shape[0] == 0: |
| | raise ValueError( |
| | "The passed images and the required batch size don't match. Images are supposed to be duplicated" |
| | f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed." |
| | " Make sure the number of images that you pass is divisible by the total requested batch size." |
| | ) |
| | ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1) |
| |
|
| | ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents |
| |
|
| | |
| | ref_image_latents = ref_image_latents.to(device=device, dtype=dtype) |
| | return ref_image_latents |
| |
|
| | @torch.no_grad() |
| | def __call__( |
| | self, |
| | prompt: Union[str, List[str]] = None, |
| | prompt_2: Optional[Union[str, List[str]]] = None, |
| | ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None, |
| | height: Optional[int] = None, |
| | width: Optional[int] = None, |
| | num_inference_steps: int = 50, |
| | denoising_end: Optional[float] = None, |
| | guidance_scale: float = 5.0, |
| | negative_prompt: Optional[Union[str, List[str]]] = None, |
| | negative_prompt_2: Optional[Union[str, List[str]]] = None, |
| | num_images_per_prompt: Optional[int] = 1, |
| | eta: float = 0.0, |
| | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
| | latents: Optional[torch.FloatTensor] = None, |
| | prompt_embeds: Optional[torch.FloatTensor] = None, |
| | negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | output_type: Optional[str] = "pil", |
| | return_dict: bool = True, |
| | callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, |
| | callback_steps: int = 1, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | guidance_rescale: float = 0.0, |
| | original_size: Optional[Tuple[int, int]] = None, |
| | crops_coords_top_left: Tuple[int, int] = (0, 0), |
| | target_size: Optional[Tuple[int, int]] = None, |
| | attention_auto_machine_weight: float = 1.0, |
| | gn_auto_machine_weight: float = 1.0, |
| | style_fidelity: float = 0.5, |
| | reference_attn: bool = True, |
| | reference_adain: bool = True, |
| | ): |
| | assert reference_attn or reference_adain, "`reference_attn` or `reference_adain` must be True." |
| |
|
| | |
| | |
| |
|
| | height = height or self.default_sample_size * self.vae_scale_factor |
| | width = width or self.default_sample_size * self.vae_scale_factor |
| | original_size = original_size or (height, width) |
| | target_size = target_size or (height, width) |
| |
|
| | |
| | self.check_inputs( |
| | prompt, |
| | prompt_2, |
| | height, |
| | width, |
| | callback_steps, |
| | negative_prompt, |
| | negative_prompt_2, |
| | prompt_embeds, |
| | negative_prompt_embeds, |
| | pooled_prompt_embeds, |
| | negative_pooled_prompt_embeds, |
| | ) |
| |
|
| | |
| | if prompt is not None and isinstance(prompt, str): |
| | batch_size = 1 |
| | elif prompt is not None and isinstance(prompt, list): |
| | batch_size = len(prompt) |
| | else: |
| | batch_size = prompt_embeds.shape[0] |
| |
|
| | device = self._execution_device |
| |
|
| | |
| | |
| | |
| | do_classifier_free_guidance = guidance_scale > 1.0 |
| |
|
| | |
| | text_encoder_lora_scale = ( |
| | cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None |
| | ) |
| | ( |
| | prompt_embeds, |
| | negative_prompt_embeds, |
| | pooled_prompt_embeds, |
| | negative_pooled_prompt_embeds, |
| | ) = self.encode_prompt( |
| | prompt=prompt, |
| | prompt_2=prompt_2, |
| | device=device, |
| | num_images_per_prompt=num_images_per_prompt, |
| | do_classifier_free_guidance=do_classifier_free_guidance, |
| | negative_prompt=negative_prompt, |
| | negative_prompt_2=negative_prompt_2, |
| | prompt_embeds=prompt_embeds, |
| | negative_prompt_embeds=negative_prompt_embeds, |
| | pooled_prompt_embeds=pooled_prompt_embeds, |
| | negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, |
| | lora_scale=text_encoder_lora_scale, |
| | ) |
| | |
| | ref_image = self.prepare_image( |
| | image=ref_image, |
| | width=width, |
| | height=height, |
| | batch_size=batch_size * num_images_per_prompt, |
| | num_images_per_prompt=num_images_per_prompt, |
| | device=device, |
| | dtype=prompt_embeds.dtype, |
| | ) |
| |
|
| | |
| | self.scheduler.set_timesteps(num_inference_steps, device=device) |
| |
|
| | timesteps = self.scheduler.timesteps |
| |
|
| | |
| | num_channels_latents = self.unet.config.in_channels |
| | latents = self.prepare_latents( |
| | batch_size * num_images_per_prompt, |
| | num_channels_latents, |
| | height, |
| | width, |
| | prompt_embeds.dtype, |
| | device, |
| | generator, |
| | latents, |
| | ) |
| | |
| | ref_image_latents = self.prepare_ref_latents( |
| | ref_image, |
| | batch_size * num_images_per_prompt, |
| | prompt_embeds.dtype, |
| | device, |
| | generator, |
| | do_classifier_free_guidance, |
| | ) |
| |
|
| | |
| | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
| |
|
| | |
| | MODE = "write" |
| | uc_mask = ( |
| | torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt) |
| | .type_as(ref_image_latents) |
| | .bool() |
| | ) |
| |
|
| | def hacked_basic_transformer_inner_forward( |
| | self, |
| | hidden_states: torch.FloatTensor, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | timestep: Optional[torch.LongTensor] = None, |
| | cross_attention_kwargs: Dict[str, Any] = None, |
| | class_labels: Optional[torch.LongTensor] = None, |
| | ): |
| | if self.use_ada_layer_norm: |
| | norm_hidden_states = self.norm1(hidden_states, timestep) |
| | elif self.use_ada_layer_norm_zero: |
| | norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( |
| | hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype |
| | ) |
| | else: |
| | norm_hidden_states = self.norm1(hidden_states) |
| |
|
| | |
| | cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} |
| | if self.only_cross_attention: |
| | attn_output = self.attn1( |
| | norm_hidden_states, |
| | encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| | attention_mask=attention_mask, |
| | **cross_attention_kwargs, |
| | ) |
| | else: |
| | if MODE == "write": |
| | self.bank.append(norm_hidden_states.detach().clone()) |
| | attn_output = self.attn1( |
| | norm_hidden_states, |
| | encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| | attention_mask=attention_mask, |
| | **cross_attention_kwargs, |
| | ) |
| | if MODE == "read": |
| | if attention_auto_machine_weight > self.attn_weight: |
| | attn_output_uc = self.attn1( |
| | norm_hidden_states, |
| | encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1), |
| | |
| | **cross_attention_kwargs, |
| | ) |
| | attn_output_c = attn_output_uc.clone() |
| | if do_classifier_free_guidance and style_fidelity > 0: |
| | attn_output_c[uc_mask] = self.attn1( |
| | norm_hidden_states[uc_mask], |
| | encoder_hidden_states=norm_hidden_states[uc_mask], |
| | **cross_attention_kwargs, |
| | ) |
| | attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc |
| | self.bank.clear() |
| | else: |
| | attn_output = self.attn1( |
| | norm_hidden_states, |
| | encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, |
| | attention_mask=attention_mask, |
| | **cross_attention_kwargs, |
| | ) |
| | if self.use_ada_layer_norm_zero: |
| | attn_output = gate_msa.unsqueeze(1) * attn_output |
| | hidden_states = attn_output + hidden_states |
| |
|
| | if self.attn2 is not None: |
| | norm_hidden_states = ( |
| | self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) |
| | ) |
| |
|
| | |
| | attn_output = self.attn2( |
| | norm_hidden_states, |
| | encoder_hidden_states=encoder_hidden_states, |
| | attention_mask=encoder_attention_mask, |
| | **cross_attention_kwargs, |
| | ) |
| | hidden_states = attn_output + hidden_states |
| |
|
| | |
| | norm_hidden_states = self.norm3(hidden_states) |
| |
|
| | if self.use_ada_layer_norm_zero: |
| | norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] |
| |
|
| | ff_output = self.ff(norm_hidden_states) |
| |
|
| | if self.use_ada_layer_norm_zero: |
| | ff_output = gate_mlp.unsqueeze(1) * ff_output |
| |
|
| | hidden_states = ff_output + hidden_states |
| |
|
| | return hidden_states |
| |
|
| | def hacked_mid_forward(self, *args, **kwargs): |
| | eps = 1e-6 |
| | x = self.original_forward(*args, **kwargs) |
| | if MODE == "write": |
| | if gn_auto_machine_weight >= self.gn_weight: |
| | var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0) |
| | self.mean_bank.append(mean) |
| | self.var_bank.append(var) |
| | if MODE == "read": |
| | if len(self.mean_bank) > 0 and len(self.var_bank) > 0: |
| | var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0) |
| | std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 |
| | mean_acc = sum(self.mean_bank) / float(len(self.mean_bank)) |
| | var_acc = sum(self.var_bank) / float(len(self.var_bank)) |
| | std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 |
| | x_uc = (((x - mean) / std) * std_acc) + mean_acc |
| | x_c = x_uc.clone() |
| | if do_classifier_free_guidance and style_fidelity > 0: |
| | x_c[uc_mask] = x[uc_mask] |
| | x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc |
| | self.mean_bank = [] |
| | self.var_bank = [] |
| | return x |
| |
|
| | def hack_CrossAttnDownBlock2D_forward( |
| | self, |
| | hidden_states: torch.FloatTensor, |
| | temb: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | ): |
| | eps = 1e-6 |
| |
|
| | |
| | output_states = () |
| |
|
| | for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): |
| | hidden_states = resnet(hidden_states, temb) |
| | hidden_states = attn( |
| | hidden_states, |
| | encoder_hidden_states=encoder_hidden_states, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | attention_mask=attention_mask, |
| | encoder_attention_mask=encoder_attention_mask, |
| | return_dict=False, |
| | )[0] |
| | if MODE == "write": |
| | if gn_auto_machine_weight >= self.gn_weight: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | self.mean_bank.append([mean]) |
| | self.var_bank.append([var]) |
| | if MODE == "read": |
| | if len(self.mean_bank) > 0 and len(self.var_bank) > 0: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 |
| | mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) |
| | var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) |
| | std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 |
| | hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc |
| | hidden_states_c = hidden_states_uc.clone() |
| | if do_classifier_free_guidance and style_fidelity > 0: |
| | hidden_states_c[uc_mask] = hidden_states[uc_mask] |
| | hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc |
| |
|
| | output_states = output_states + (hidden_states,) |
| |
|
| | if MODE == "read": |
| | self.mean_bank = [] |
| | self.var_bank = [] |
| |
|
| | if self.downsamplers is not None: |
| | for downsampler in self.downsamplers: |
| | hidden_states = downsampler(hidden_states) |
| |
|
| | output_states = output_states + (hidden_states,) |
| |
|
| | return hidden_states, output_states |
| |
|
| | def hacked_DownBlock2D_forward(self, hidden_states, temb=None): |
| | eps = 1e-6 |
| |
|
| | output_states = () |
| |
|
| | for i, resnet in enumerate(self.resnets): |
| | hidden_states = resnet(hidden_states, temb) |
| |
|
| | if MODE == "write": |
| | if gn_auto_machine_weight >= self.gn_weight: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | self.mean_bank.append([mean]) |
| | self.var_bank.append([var]) |
| | if MODE == "read": |
| | if len(self.mean_bank) > 0 and len(self.var_bank) > 0: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 |
| | mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) |
| | var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) |
| | std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 |
| | hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc |
| | hidden_states_c = hidden_states_uc.clone() |
| | if do_classifier_free_guidance and style_fidelity > 0: |
| | hidden_states_c[uc_mask] = hidden_states[uc_mask] |
| | hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc |
| |
|
| | output_states = output_states + (hidden_states,) |
| |
|
| | if MODE == "read": |
| | self.mean_bank = [] |
| | self.var_bank = [] |
| |
|
| | if self.downsamplers is not None: |
| | for downsampler in self.downsamplers: |
| | hidden_states = downsampler(hidden_states) |
| |
|
| | output_states = output_states + (hidden_states,) |
| |
|
| | return hidden_states, output_states |
| |
|
| | def hacked_CrossAttnUpBlock2D_forward( |
| | self, |
| | hidden_states: torch.FloatTensor, |
| | res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], |
| | temb: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | upsample_size: Optional[int] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | ): |
| | eps = 1e-6 |
| | |
| | for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): |
| | |
| | res_hidden_states = res_hidden_states_tuple[-1] |
| | res_hidden_states_tuple = res_hidden_states_tuple[:-1] |
| | hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) |
| | hidden_states = resnet(hidden_states, temb) |
| | hidden_states = attn( |
| | hidden_states, |
| | encoder_hidden_states=encoder_hidden_states, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | attention_mask=attention_mask, |
| | encoder_attention_mask=encoder_attention_mask, |
| | return_dict=False, |
| | )[0] |
| |
|
| | if MODE == "write": |
| | if gn_auto_machine_weight >= self.gn_weight: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | self.mean_bank.append([mean]) |
| | self.var_bank.append([var]) |
| | if MODE == "read": |
| | if len(self.mean_bank) > 0 and len(self.var_bank) > 0: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 |
| | mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) |
| | var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) |
| | std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 |
| | hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc |
| | hidden_states_c = hidden_states_uc.clone() |
| | if do_classifier_free_guidance and style_fidelity > 0: |
| | hidden_states_c[uc_mask] = hidden_states[uc_mask] |
| | hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc |
| |
|
| | if MODE == "read": |
| | self.mean_bank = [] |
| | self.var_bank = [] |
| |
|
| | if self.upsamplers is not None: |
| | for upsampler in self.upsamplers: |
| | hidden_states = upsampler(hidden_states, upsample_size) |
| |
|
| | return hidden_states |
| |
|
| | def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): |
| | eps = 1e-6 |
| | for i, resnet in enumerate(self.resnets): |
| | |
| | res_hidden_states = res_hidden_states_tuple[-1] |
| | res_hidden_states_tuple = res_hidden_states_tuple[:-1] |
| | hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) |
| | hidden_states = resnet(hidden_states, temb) |
| |
|
| | if MODE == "write": |
| | if gn_auto_machine_weight >= self.gn_weight: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | self.mean_bank.append([mean]) |
| | self.var_bank.append([var]) |
| | if MODE == "read": |
| | if len(self.mean_bank) > 0 and len(self.var_bank) > 0: |
| | var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) |
| | std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 |
| | mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) |
| | var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) |
| | std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 |
| | hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc |
| | hidden_states_c = hidden_states_uc.clone() |
| | if do_classifier_free_guidance and style_fidelity > 0: |
| | hidden_states_c[uc_mask] = hidden_states[uc_mask] |
| | hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc |
| |
|
| | if MODE == "read": |
| | self.mean_bank = [] |
| | self.var_bank = [] |
| |
|
| | if self.upsamplers is not None: |
| | for upsampler in self.upsamplers: |
| | hidden_states = upsampler(hidden_states, upsample_size) |
| |
|
| | return hidden_states |
| |
|
| | if reference_attn: |
| | attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)] |
| | attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) |
| |
|
| | for i, module in enumerate(attn_modules): |
| | module._original_inner_forward = module.forward |
| | module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock) |
| | module.bank = [] |
| | module.attn_weight = float(i) / float(len(attn_modules)) |
| |
|
| | if reference_adain: |
| | gn_modules = [self.unet.mid_block] |
| | self.unet.mid_block.gn_weight = 0 |
| |
|
| | down_blocks = self.unet.down_blocks |
| | for w, module in enumerate(down_blocks): |
| | module.gn_weight = 1.0 - float(w) / float(len(down_blocks)) |
| | gn_modules.append(module) |
| |
|
| | up_blocks = self.unet.up_blocks |
| | for w, module in enumerate(up_blocks): |
| | module.gn_weight = float(w) / float(len(up_blocks)) |
| | gn_modules.append(module) |
| |
|
| | for i, module in enumerate(gn_modules): |
| | if getattr(module, "original_forward", None) is None: |
| | module.original_forward = module.forward |
| | if i == 0: |
| | |
| | module.forward = hacked_mid_forward.__get__(module, torch.nn.Module) |
| | elif isinstance(module, CrossAttnDownBlock2D): |
| | module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D) |
| | elif isinstance(module, DownBlock2D): |
| | module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D) |
| | elif isinstance(module, CrossAttnUpBlock2D): |
| | module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D) |
| | elif isinstance(module, UpBlock2D): |
| | module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D) |
| | module.mean_bank = [] |
| | module.var_bank = [] |
| | module.gn_weight *= 2 |
| |
|
| | |
| | add_text_embeds = pooled_prompt_embeds |
| | add_time_ids = self._get_add_time_ids( |
| | original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype |
| | ) |
| |
|
| | if do_classifier_free_guidance: |
| | prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) |
| | add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) |
| | add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) |
| |
|
| | prompt_embeds = prompt_embeds.to(device) |
| | add_text_embeds = add_text_embeds.to(device) |
| | add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) |
| |
|
| | |
| | num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) |
| |
|
| | |
| | if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: |
| | discrete_timestep_cutoff = int( |
| | round( |
| | self.scheduler.config.num_train_timesteps |
| | - (denoising_end * self.scheduler.config.num_train_timesteps) |
| | ) |
| | ) |
| | num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) |
| | timesteps = timesteps[:num_inference_steps] |
| |
|
| | with self.progress_bar(total=num_inference_steps) as progress_bar: |
| | for i, t in enumerate(timesteps): |
| | |
| | latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
| |
|
| | latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
| |
|
| | added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} |
| |
|
| | |
| | noise = randn_tensor( |
| | ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype |
| | ) |
| | ref_xt = self.scheduler.add_noise( |
| | ref_image_latents, |
| | noise, |
| | t.reshape( |
| | 1, |
| | ), |
| | ) |
| | ref_xt = self.scheduler.scale_model_input(ref_xt, t) |
| |
|
| | MODE = "write" |
| |
|
| | self.unet( |
| | ref_xt, |
| | t, |
| | encoder_hidden_states=prompt_embeds, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | added_cond_kwargs=added_cond_kwargs, |
| | return_dict=False, |
| | ) |
| |
|
| | |
| | MODE = "read" |
| | noise_pred = self.unet( |
| | latent_model_input, |
| | t, |
| | encoder_hidden_states=prompt_embeds, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | added_cond_kwargs=added_cond_kwargs, |
| | return_dict=False, |
| | )[0] |
| |
|
| | |
| | if do_classifier_free_guidance: |
| | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
| |
|
| | if do_classifier_free_guidance and guidance_rescale > 0.0: |
| | |
| | noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) |
| |
|
| | |
| | latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] |
| |
|
| | |
| | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
| | progress_bar.update() |
| | if callback is not None and i % callback_steps == 0: |
| | step_idx = i // getattr(self.scheduler, "order", 1) |
| | callback(step_idx, t, latents) |
| |
|
| | if not output_type == "latent": |
| | |
| | needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast |
| |
|
| | if needs_upcasting: |
| | self.upcast_vae() |
| | latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) |
| |
|
| | image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] |
| |
|
| | |
| | if needs_upcasting: |
| | self.vae.to(dtype=torch.float16) |
| | else: |
| | image = latents |
| | return StableDiffusionXLPipelineOutput(images=image) |
| |
|
| | |
| | if self.watermark is not None: |
| | image = self.watermark.apply_watermark(image) |
| |
|
| | image = self.image_processor.postprocess(image, output_type=output_type) |
| |
|
| | |
| | if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: |
| | self.final_offload_hook.offload() |
| |
|
| | if not return_dict: |
| | return (image,) |
| |
|
| | return StableDiffusionXLPipelineOutput(images=image) |
| |
|