Skip to content

Commit 5aa4f1d

Browse files
committed
remove list, tuple, dict from typing
1 parent 922e273 commit 5aa4f1d

510 files changed

Lines changed: 7517 additions & 7530 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

src/diffusers/callbacks.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any, Dict, List
1+
from typing import Any
22

33
from .configuration_utils import ConfigMixin, register_to_config
44
from .utils import CONFIG_NAME
@@ -33,13 +33,13 @@ def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None):
3333
raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.")
3434

3535
@property
36-
def tensor_inputs(self) -> List[str]:
36+
def tensor_inputs(self) -> list[str]:
3737
raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}")
3838

39-
def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]:
39+
def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> dict[str, Any]:
4040
raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}")
4141

42-
def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
42+
def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
4343
return self.callback_fn(pipeline, step_index, timestep, callback_kwargs)
4444

4545

@@ -49,14 +49,14 @@ class MultiPipelineCallbacks:
4949
provides a unified interface for calling all of them.
5050
"""
5151

52-
def __init__(self, callbacks: List[PipelineCallback]):
52+
def __init__(self, callbacks: list[PipelineCallback]):
5353
self.callbacks = callbacks
5454

5555
@property
56-
def tensor_inputs(self) -> List[str]:
56+
def tensor_inputs(self) -> list[str]:
5757
return [input for callback in self.callbacks for input in callback.tensor_inputs]
5858

59-
def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
59+
def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
6060
"""
6161
Calls all the callbacks in order with the given arguments and returns the final callback_kwargs.
6262
"""
@@ -76,7 +76,7 @@ class SDCFGCutoffCallback(PipelineCallback):
7676

7777
tensor_inputs = ["prompt_embeds"]
7878

79-
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
79+
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
8080
cutoff_step_ratio = self.config.cutoff_step_ratio
8181
cutoff_step_index = self.config.cutoff_step_index
8282

@@ -109,7 +109,7 @@ class SDXLCFGCutoffCallback(PipelineCallback):
109109
"add_time_ids",
110110
]
111111

112-
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
112+
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
113113
cutoff_step_ratio = self.config.cutoff_step_ratio
114114
cutoff_step_index = self.config.cutoff_step_index
115115

@@ -152,7 +152,7 @@ class SDXLControlnetCFGCutoffCallback(PipelineCallback):
152152
"image",
153153
]
154154

155-
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
155+
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
156156
cutoff_step_ratio = self.config.cutoff_step_ratio
157157
cutoff_step_index = self.config.cutoff_step_index
158158

@@ -195,7 +195,7 @@ class IPAdapterScaleCutoffCallback(PipelineCallback):
195195

196196
tensor_inputs = []
197197

198-
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
198+
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
199199
cutoff_step_ratio = self.config.cutoff_step_ratio
200200
cutoff_step_index = self.config.cutoff_step_index
201201

@@ -219,7 +219,7 @@ class SD3CFGCutoffCallback(PipelineCallback):
219219

220220
tensor_inputs = ["prompt_embeds", "pooled_prompt_embeds"]
221221

222-
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]:
222+
def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> dict[str, Any]:
223223
cutoff_step_ratio = self.config.cutoff_step_ratio
224224
cutoff_step_index = self.config.cutoff_step_index
225225

src/diffusers/configuration_utils.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
import re
2525
from collections import OrderedDict
2626
from pathlib import Path
27-
from typing import Any, Dict, Optional, Tuple, Union
27+
from typing import Any, Optional, Union
2828

2929
import numpy as np
3030
from huggingface_hub import DDUFEntry, create_repo, hf_hub_download
@@ -94,10 +94,10 @@ class ConfigMixin:
9494
Class attributes:
9595
- **config_name** (`str`) -- A filename under which the config should stored when calling
9696
[`~ConfigMixin.save_config`] (should be overridden by parent class).
97-
- **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be
97+
- **ignore_for_config** (`list[str]`) -- A list of attributes that should not be saved in the config (should be
9898
overridden by subclass).
9999
- **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).
100-
- **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function
100+
- **_deprecated_kwargs** (`list[str]`) -- Keyword arguments that are deprecated. Note that the `init` function
101101
should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by
102102
subclass).
103103
"""
@@ -155,7 +155,7 @@ def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool
155155
Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
156156
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
157157
namespace).
158-
kwargs (`Dict[str, Any]`, *optional*):
158+
kwargs (`dict[str, Any]`, *optional*):
159159
Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
160160
"""
161161
if os.path.isfile(save_directory):
@@ -189,13 +189,13 @@ def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool
189189

190190
@classmethod
191191
def from_config(
192-
cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs
193-
) -> Union[Self, Tuple[Self, Dict[str, Any]]]:
192+
cls, config: Union[FrozenDict, dict[str, Any]] = None, return_unused_kwargs=False, **kwargs
193+
) -> Union[Self, tuple[Self, dict[str, Any]]]:
194194
r"""
195195
Instantiate a Python class from a config dictionary.
196196
197197
Parameters:
198-
config (`Dict[str, Any]`):
198+
config (`dict[str, Any]`):
199199
A config dictionary from which the Python class is instantiated. Make sure to only load configuration
200200
files of compatible classes.
201201
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
@@ -296,7 +296,7 @@ def load_config(
296296
return_unused_kwargs=False,
297297
return_commit_hash=False,
298298
**kwargs,
299-
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
299+
) -> tuple[dict[str, Any], dict[str, Any]]:
300300
r"""
301301
Load a model or scheduler configuration.
302302
@@ -315,7 +315,7 @@ def load_config(
315315
force_download (`bool`, *optional*, defaults to `False`):
316316
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
317317
cached versions if they exist.
318-
proxies (`Dict[str, str]`, *optional*):
318+
proxies (`dict[str, str]`, *optional*):
319319
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
320320
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
321321
output_loading_info(`bool`, *optional*, defaults to `False`):
@@ -352,7 +352,7 @@ def load_config(
352352
_ = kwargs.pop("mirror", None)
353353
subfolder = kwargs.pop("subfolder", None)
354354
user_agent = kwargs.pop("user_agent", {})
355-
dduf_entries: Optional[Dict[str, DDUFEntry]] = kwargs.pop("dduf_entries", None)
355+
dduf_entries: Optional[dict[str, DDUFEntry]] = kwargs.pop("dduf_entries", None)
356356

357357
user_agent = {**user_agent, "file_type": "config"}
358358
user_agent = http_user_agent(user_agent)
@@ -564,7 +564,7 @@ def extract_init_dict(cls, config_dict, **kwargs):
564564

565565
@classmethod
566566
def _dict_from_json_file(
567-
cls, json_file: Union[str, os.PathLike], dduf_entries: Optional[Dict[str, DDUFEntry]] = None
567+
cls, json_file: Union[str, os.PathLike], dduf_entries: Optional[dict[str, DDUFEntry]] = None
568568
):
569569
if dduf_entries:
570570
text = dduf_entries[json_file].read_text()
@@ -577,12 +577,12 @@ def __repr__(self):
577577
return f"{self.__class__.__name__} {self.to_json_string()}"
578578

579579
@property
580-
def config(self) -> Dict[str, Any]:
580+
def config(self) -> dict[str, Any]:
581581
"""
582582
Returns the config of the class as a frozen dictionary
583583
584584
Returns:
585-
`Dict[str, Any]`: Config of the class.
585+
`dict[str, Any]`: Config of the class.
586586
"""
587587
return self._internal_dict
588588

@@ -637,7 +637,7 @@ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
637637
writer.write(self.to_json_string())
638638

639639
@classmethod
640-
def _get_config_file_from_dduf(cls, pretrained_model_name_or_path: str, dduf_entries: Dict[str, DDUFEntry]):
640+
def _get_config_file_from_dduf(cls, pretrained_model_name_or_path: str, dduf_entries: dict[str, DDUFEntry]):
641641
# paths inside a DDUF file must always be "/"
642642
config_file = (
643643
cls.config_name
@@ -756,7 +756,7 @@ class LegacyConfigMixin(ConfigMixin):
756756
"""
757757

758758
@classmethod
759-
def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):
759+
def from_config(cls, config: Union[FrozenDict, dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):
760760
# To prevent dependency import problem.
761761
from .models.model_loading_utils import _fetch_remapped_cls_from_config
762762

src/diffusers/guiders/adaptive_projected_guidance.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import math
16-
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
16+
from typing import TYPE_CHECKING, Optional, Union
1717

1818
import torch
1919

@@ -77,8 +77,8 @@ def __init__(
7777
self.momentum_buffer = None
7878

7979
def prepare_inputs(
80-
self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
81-
) -> List["BlockState"]:
80+
self, data: "BlockState", input_fields: Optional[dict[str, Union[str, tuple[str, str]]]] = None
81+
) -> list["BlockState"]:
8282
if input_fields is None:
8383
input_fields = self._input_fields
8484

src/diffusers/guiders/auto_guidance.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import math
16-
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
16+
from typing import TYPE_CHECKING, Any, Optional, Union
1717

1818
import torch
1919

@@ -36,10 +36,10 @@ class AutoGuidance(BaseGuidance):
3636
The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text
3737
prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and
3838
deterioration of image quality.
39-
auto_guidance_layers (`int` or `List[int]`, *optional*):
39+
auto_guidance_layers (`int` or `list[int]`, *optional*):
4040
The layer indices to apply skip layer guidance to. Can be a single integer or a list of integers. If not
4141
provided, `skip_layer_config` must be provided.
42-
auto_guidance_config (`LayerSkipConfig` or `List[LayerSkipConfig]`, *optional*):
42+
auto_guidance_config (`LayerSkipConfig` or `list[LayerSkipConfig]`, *optional*):
4343
The configuration for the skip layer guidance. Can be a single `LayerSkipConfig` or a list of
4444
`LayerSkipConfig`. If not provided, `skip_layer_guidance_layers` must be provided.
4545
dropout (`float`, *optional*):
@@ -65,8 +65,8 @@ class AutoGuidance(BaseGuidance):
6565
def __init__(
6666
self,
6767
guidance_scale: float = 7.5,
68-
auto_guidance_layers: Optional[Union[int, List[int]]] = None,
69-
auto_guidance_config: Union[LayerSkipConfig, List[LayerSkipConfig], Dict[str, Any]] = None,
68+
auto_guidance_layers: Optional[Union[int, list[int]]] = None,
69+
auto_guidance_config: Union[LayerSkipConfig, list[LayerSkipConfig], dict[str, Any]] = None,
7070
dropout: Optional[float] = None,
7171
guidance_rescale: float = 0.0,
7272
use_original_formulation: bool = False,
@@ -133,8 +133,8 @@ def cleanup_models(self, denoiser: torch.nn.Module) -> None:
133133
registry.remove_hook(name, recurse=True)
134134

135135
def prepare_inputs(
136-
self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
137-
) -> List["BlockState"]:
136+
self, data: "BlockState", input_fields: Optional[dict[str, Union[str, tuple[str, str]]]] = None
137+
) -> list["BlockState"]:
138138
if input_fields is None:
139139
input_fields = self._input_fields
140140

src/diffusers/guiders/classifier_free_guidance.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import math
16-
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
16+
from typing import TYPE_CHECKING, Optional, Union
1717

1818
import torch
1919

@@ -84,8 +84,8 @@ def __init__(
8484
self.use_original_formulation = use_original_formulation
8585

8686
def prepare_inputs(
87-
self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
88-
) -> List["BlockState"]:
87+
self, data: "BlockState", input_fields: Optional[dict[str, Union[str, tuple[str, str]]]] = None
88+
) -> list["BlockState"]:
8989
if input_fields is None:
9090
input_fields = self._input_fields
9191

src/diffusers/guiders/classifier_free_zero_star_guidance.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import math
16-
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
16+
from typing import TYPE_CHECKING, Optional, Union
1717

1818
import torch
1919

@@ -77,8 +77,8 @@ def __init__(
7777
self.use_original_formulation = use_original_formulation
7878

7979
def prepare_inputs(
80-
self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
81-
) -> List["BlockState"]:
80+
self, data: "BlockState", input_fields: Optional[dict[str, Union[str, tuple[str, str]]]] = None
81+
) -> list["BlockState"]:
8282
if input_fields is None:
8383
input_fields = self._input_fields
8484

src/diffusers/guiders/frequency_decoupled_guidance.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import math
16-
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
16+
from typing import TYPE_CHECKING, Optional, Union
1717

1818
import torch
1919

@@ -37,7 +37,7 @@
3737
build_laplacian_pyramid_func = None
3838

3939

40-
def project(v0: torch.Tensor, v1: torch.Tensor, upcast_to_double: bool = True) -> Tuple[torch.Tensor, torch.Tensor]:
40+
def project(v0: torch.Tensor, v1: torch.Tensor, upcast_to_double: bool = True) -> tuple[torch.Tensor, torch.Tensor]:
4141
"""
4242
Project vector v0 onto vector v1, returning the parallel and orthogonal components of v0. Implementation from paper
4343
(Algorithm 2).
@@ -58,7 +58,7 @@ def project(v0: torch.Tensor, v1: torch.Tensor, upcast_to_double: bool = True) -
5858
return v0_parallel, v0_orthogonal
5959

6060

61-
def build_image_from_pyramid(pyramid: List[torch.Tensor]) -> torch.Tensor:
61+
def build_image_from_pyramid(pyramid: list[torch.Tensor]) -> torch.Tensor:
6262
"""
6363
Recovers the data space latents from the Laplacian pyramid frequency space. Implementation from the paper
6464
(Algorithm 2).
@@ -99,19 +99,19 @@ class FrequencyDecoupledGuidance(BaseGuidance):
9999
paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time.
100100
101101
Args:
102-
guidance_scales (`List[float]`, defaults to `[10.0, 5.0]`):
102+
guidance_scales (`list[float]`, defaults to `[10.0, 5.0]`):
103103
The scale parameter for frequency-decoupled guidance for each frequency component, listed from highest
104104
frequency level to lowest. Higher values result in stronger conditioning on the text prompt, while lower
105105
values allow for more freedom in generation. Higher values may lead to saturation and deterioration of
106106
image quality. The FDG authors recommend using higher guidance scales for higher frequency components and
107107
lower guidance scales for lower frequency components (so `guidance_scales` should typically be sorted in
108108
descending order).
109-
guidance_rescale (`float` or `List[float]`, defaults to `0.0`):
109+
guidance_rescale (`float` or `list[float]`, defaults to `0.0`):
110110
The rescale factor applied to the noise predictions. This is used to improve image quality and fix
111111
overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
112112
Flawed](https://huggingface.co/papers/2305.08891). If a list is supplied, it should be the same length as
113113
`guidance_scales`.
114-
parallel_weights (`float` or `List[float]`, *optional*):
114+
parallel_weights (`float` or `list[float]`, *optional*):
115115
Optional weights for the parallel component of each frequency component of the projected CFG shift. If not
116116
set, the weights will default to `1.0` for all components, which corresponds to using the normal CFG shift
117117
(that is, equal weights for the parallel and orthogonal components). If set, a value in `[0, 1]` is
@@ -120,10 +120,10 @@ class FrequencyDecoupledGuidance(BaseGuidance):
120120
Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default,
121121
we use the diffusers-native implementation that has been in the codebase for a long time. See
122122
[~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details.
123-
start (`float` or `List[float]`, defaults to `0.0`):
123+
start (`float` or `list[float]`, defaults to `0.0`):
124124
The fraction of the total number of denoising steps after which guidance starts. If a list is supplied, it
125125
should be the same length as `guidance_scales`.
126-
stop (`float` or `List[float]`, defaults to `1.0`):
126+
stop (`float` or `list[float]`, defaults to `1.0`):
127127
The fraction of the total number of denoising steps after which guidance stops. If a list is supplied, it
128128
should be the same length as `guidance_scales`.
129129
guidance_rescale_space (`str`, defaults to `"data"`):
@@ -141,12 +141,12 @@ class FrequencyDecoupledGuidance(BaseGuidance):
141141
@register_to_config
142142
def __init__(
143143
self,
144-
guidance_scales: Union[List[float], Tuple[float]] = [10.0, 5.0],
145-
guidance_rescale: Union[float, List[float], Tuple[float]] = 0.0,
146-
parallel_weights: Optional[Union[float, List[float], Tuple[float]]] = None,
144+
guidance_scales: Union[list[float], tuple[float]] = [10.0, 5.0],
145+
guidance_rescale: Union[float, list[float], tuple[float]] = 0.0,
146+
parallel_weights: Optional[Union[float, list[float], tuple[float]]] = None,
147147
use_original_formulation: bool = False,
148-
start: Union[float, List[float], Tuple[float]] = 0.0,
149-
stop: Union[float, List[float], Tuple[float]] = 1.0,
148+
start: Union[float, list[float], tuple[float]] = 0.0,
149+
stop: Union[float, list[float], tuple[float]] = 1.0,
150150
guidance_rescale_space: str = "data",
151151
upcast_to_double: bool = True,
152152
):
@@ -218,8 +218,8 @@ def __init__(
218218
)
219219

220220
def prepare_inputs(
221-
self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
222-
) -> List["BlockState"]:
221+
self, data: "BlockState", input_fields: Optional[dict[str, Union[str, tuple[str, str]]]] = None
222+
) -> list["BlockState"]:
223223
if input_fields is None:
224224
input_fields = self._input_fields
225225

0 commit comments

Comments
 (0)