You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
C:\A启动器\.ext\lib\site-packages\pytorch_lightning\utilities\distributed.py:258: LightningDeprecationWarning: `pytorch_lightning.utilities.distributed.rank_zero_only` has been deprecated in v1.8.1 and will be removed in v2.0.0. You can import it from `pytorch_lightning.utilities` instead.
rank_zero_deprecation(
Launching Web UI with arguments: --skip-torch-cuda-test --use-directml --medvram-sdxl --theme dark --opt-sub-quad-attention --precision full --upcast-sampling --no-half-vae --api --autolaunch
ONNX: selected=DmlExecutionProvider, available=['DmlExecutionProvider', 'CPUExecutionProvider']
==============================================================================
You are running torch 1.13.1+cu117.
The program is tested to work with torch 2.1.2.
To reinstall the desired version, run with commandline flag --reinstall-torch.
Beware that this will cause a lot of large files to be downloaded, as well as
there are reports of issues with training tab on the latest version.
Use --skip-version-check commandline argument to disable this check.
==============================================================================
Loading weights [a4e2e83962] from C:\A启动器\models\Stable-diffusion\anything-V2.1-pruned-fp16.safetensors
Creating model from config: C:\A启动器\configs\v1-inference.yaml
Running on local URL: http://127.0.0.1:7860
To create a public link, set`share=True`in`launch()`.Startup time: 7.7s (prepare environment: 11.7s, initialize shared: 2.3s, other imports: 0.1s, load scripts: 2.9s, create ui: 0.8s, gradio launch: 0.4s, add APIs: 0.5s).Applying attention optimization: sub-quadratic... done.Model loaded in 12.0s (load weights from disk: 0.8s, create model: 1.9s, apply weights to model: 6.5s, apply dtype to VAE: 1.5s, move model to device: 0.2s, calculate empty prompt: 1.0s).*** Error completing request*** Arguments: ('task(1mha1z1sum49stl)', <gradio.routes.Request object at 0x00000255AB474D90>, '1girl', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {} Traceback (most recent call last): File "C:\A启动器\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "C:\A启动器\modules\call_queue.py", line 36, in f res = func(*args, **kwargs) File "C:\A启动器\modules\txt2img.py", line 110, in txt2img processed = processing.process_images(p) File "C:\A启动器\modules\processing.py", line 787, in process_images res = process_images_inner(p) File "C:\A启动器\modules\processing.py", line 1015, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "C:\A启动器\modules\processing.py", line 1351, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "C:\A启动器\modules\sd_samplers_kdiffusion.py", line 239, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "C:\A启动器\modules\sd_samplers_common.py", line 261, in launch_samplingreturnfunc() File "C:\A启动器\modules\sd_samplers_kdiffusion.py", line 239, in<lambda> samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "C:\A启动器\.ext\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_contextreturn func(*args, **kwargs) File "C:\A启动器\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m denoised = model(x, sigmas[i] * s_in, **extra_args) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\modules\sd_samplers_cfg_denoiser.py", line 237, in forward x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs) File "C:\A启动器\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_epsreturn self.inner_model.apply_model(*args, **kwargs) File "C:\A启动器\modules\sd_hijack_utils.py", line 18, in<lambda> setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) File "C:\A启动器\modules\sd_hijack_utils.py", line 30, in __call__return self.__sub_func(self.__orig_func, *args, **kwargs) File "C:\A启动器\modules\sd_hijack_unet.py", line 48, in apply_modelreturn orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float() File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model x_recon = self.model(x_noisy, t, **cond) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward out = self.diffusion_model(x, t, context=cc) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\modules\sd_unet.py", line 91, in UNetModel_forwardreturn original_forward(self, x, timesteps, context, *args, **kwargs) File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward h = module(h, emb, context) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 82, in forward x = layer(x, emb) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 249, in forwardreturn checkpoint( File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpointreturn CheckpointFunction.apply(func, len(inputs), *args) File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward output_tensors = ctx.run_function(*ctx.input_tensors) File "C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 262, in _forward h = self.in_layers(x) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\container.py", line 204, in forward input = module(input) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_implreturn forward_call(*input, **kwargs) File "C:\A启动器\extensions-builtin\Lora\networks.py", line 515, in network_Conv2d_forwardreturn originals.Conv2d_forward(self, input) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\conv.py", line 463, in forwardreturn self._conv_forward(input, self.weight, self.bias) File "C:\A启动器\.ext\lib\site-packages\torch\nn\modules\conv.py", line 459, in _conv_forwardreturn F.conv2d(input, weight, bias, self.stride, File "C:\A启动器\modules\dml\amp\autocast_mode.py", line 43, in<lambda> setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: forward(op, args, kwargs)) File "C:\A启动器\modules\dml\amp\autocast_mode.py", line 15, in forwardreturn op(*args, **kwargs) RuntimeError---
Additional information
No response
The text was updated successfully, but these errors were encountered:
Checklist
What happened?
The process is interrupted during the use of model inference for image.
Graphics card:Intel UHD
Steps to reproduce the problem
run the user.bat
What should have happened?
The process is interrupted during the use of model inference for image.
What browsers do you use to access the UI ?
Google Chrome
Sysinfo
{
"Platform": "Windows-10-10.0.19041-SP0",
"Python": "3.10.11",
"Version": "1.8.0-RC",
"Commit": "25a3b6cbeea8a07afd5e4594afc2f1c79f41ac1a",
"Script path": "C:\A启动器",
"Data path": "C:\A启动器",
"Extensions dir": "C:\A启动器\extensions",
"Checksum": "7c2934e3c179bb518dfe1924b3227f4e62a343abd418168eeda03fed90b58854",
"Commandline": [
"C:\A启动器\launch.py",
"--skip-torch-cuda-test",
"--use-directml",
"--medvram-sdxl",
"--theme",
"dark",
"--opt-sub-quad-attention",
"--precision",
"full",
"--upcast-sampling",
"--no-half-vae",
"--api",
"--autolaunch"
],
"Torch env info": {
"torch_version": "1.13.1+cu117",
"is_debug_build": "False",
"cuda_compiled_version": "11.7",
"gcc_version": null,
"clang_version": null,
"cmake_version": null,
"os": "Microsoft Windows 10 企业版",
"libc_version": "N/A",
"python_version": "3.10.11 | packaged by Anaconda, Inc. | (main, May 16 2023, 00:55:32) [MSC v.1916 64 bit (AMD64)] (64-bit runtime)",
"python_platform": "Windows-10-10.0.19041-SP0",
"is_cuda_available": "False",
"cuda_runtime_version": null,
"cuda_module_loading": "N/A",
"nvidia_driver_version": null,
"nvidia_gpu_models": null,
"cudnn_version": null,
"pip_version": "pip3",
"pip_packages": [
"numpy==1.26.2",
"open-clip-torch==2.20.0",
"pytorch-lightning==1.9.4",
"torch==1.13.1+cu117",
"torch-directml==0.1.13.1.dev230413",
"torchdiffeq==0.2.3",
"torchmetrics==1.3.1",
"torchsde==0.2.6",
"torchvision==0.14.1+cu117"
],
"conda_packages": null,
"hip_compiled_version": "N/A",
"hip_runtime_version": "N/A",
"miopen_runtime_version": "N/A",
"caching_allocator_config": "",
"is_xnnpack_available": "True"
},
"Exceptions": [
{
"exception": "",
"traceback": [
[
"C:\A启动器\modules\call_queue.py, line 57, f",
"res = list(func(*args, **kwargs))"
],
[
"C:\A启动器\modules\call_queue.py, line 36, f",
"res = func(*args, **kwargs)"
],
[
"C:\A启动器\modules\txt2img.py, line 110, txt2img",
"processed = processing.process_images(p)"
],
[
"C:\A启动器\modules\processing.py, line 787, process_images",
"res = process_images_inner(p)"
],
[
"C:\A启动器\modules\processing.py, line 1015, process_images_inner",
"samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)"
],
[
"C:\A启动器\modules\processing.py, line 1351, sample",
"samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))"
],
[
"C:\A启动器\modules\sd_samplers_kdiffusion.py, line 239, sample",
"samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))"
],
[
"C:\A启动器\modules\sd_samplers_common.py, line 261, launch_sampling",
"return func()"
],
[
"C:\A启动器\modules\sd_samplers_kdiffusion.py, line 239, ",
"samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\autograd\grad_mode.py, line 27, decorate_context",
"return func(*args, **kwargs)"
],
[
"C:\A启动器\repositories\k-diffusion\k_diffusion\sampling.py, line 594, sample_dpmpp_2m",
"denoised = model(x, sigmas[i] * s_in, **extra_args)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\modules\sd_samplers_cfg_denoiser.py, line 237, forward",
"x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\repositories\k-diffusion\k_diffusion\external.py, line 112, forward",
"eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)"
],
[
"C:\A启动器\repositories\k-diffusion\k_diffusion\external.py, line 138, get_eps",
"return self.inner_model.apply_model(*args, **kwargs)"
],
[
"C:\A启动器\modules\sd_hijack_utils.py, line 18, ",
"setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))"
],
[
"C:\A启动器\modules\sd_hijack_utils.py, line 30, call",
"return self.__sub_func(self.__orig_func, *args, **kwargs)"
],
[
"C:\A启动器\modules\sd_hijack_unet.py, line 48, apply_model",
"return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py, line 858, apply_model",
"x_recon = self.model(x_noisy, t, **cond)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py, line 1335, forward",
"out = self.diffusion_model(x, t, context=cc)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\modules\sd_unet.py, line 91, UNetModel_forward",
"return original_forward(self, x, timesteps, context, *args, **kwargs)"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py, line 802, forward",
"h = module(h, emb, context)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py, line 82, forward",
"x = layer(x, emb)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py, line 249, forward",
"return checkpoint("
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py, line 121, checkpoint",
"return CheckpointFunction.apply(func, len(inputs), *args)"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py, line 136, forward",
"output_tensors = ctx.run_function(*ctx.input_tensors)"
],
[
"C:\A启动器\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py, line 262, _forward",
"h = self.in_layers(x)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\container.py, line 204, forward",
"input = module(input)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\module.py, line 1194, _call_impl",
"return forward_call(*input, **kwargs)"
],
[
"C:\A启动器\extensions-builtin\Lora\networks.py, line 515, network_Conv2d_forward",
"return originals.Conv2d_forward(self, input)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\conv.py, line 463, forward",
"return self._conv_forward(input, self.weight, self.bias)"
],
[
"C:\A启动器\.ext\lib\site-packages\torch\nn\modules\conv.py, line 459, _conv_forward",
"return F.conv2d(input, weight, bias, self.stride,"
],
[
"C:\A启动器\modules\dml\amp\autocast_mode.py, line 43, ",
"setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: forward(op, args, kwargs))"
],
[
"C:\A启动器\modules\dml\amp\autocast_mode.py, line 15, forward",
"return op(*args, **kwargs)"
]
]
}
],
"CPU": {
"model": "Intel64 Family 6 Model 190 Stepping 0, GenuineIntel",
"count logical": 4,
"count physical": 4
},
"RAM": {
"total": "16GB",
"used": "13GB",
"free": "3GB"
},
"GPU": {
"model": "Intel(R) UHD Graphics",
"total_memory": 7689924608
},
"Extensions": [],
"Inactive extensions": [],
"Environment": {
"CLIP_PACKAGE": "git+https://gitcode.net/overbill1683/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1",
"GRADIO_ANALYTICS_ENABLED": "False",
"INDEX_URL": "",
"OPENCLIP_PACKAGE": "git+https://gitcode.net/overbill1683/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b",
"PYTHONPATH": "C:\A启动器\.launcher\pyinterop.hfkx1kkk0g4q7.zip;C:\A启动器\.launcher\pyvendors.ph76f3ddzrvzc.zip;C:\A启动器"
},
"Config": {
"ldsr_steps": 100,
"ldsr_cached": false,
"SCUNET_tile": 256,
"SCUNET_tile_overlap": 8,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"SWIN_torch_compile": false,
"hypertile_enable_unet": false,
"hypertile_enable_unet_secondpass": false,
"hypertile_max_depth_unet": 3,
"hypertile_max_tile_unet": 256,
"hypertile_swap_size_unet": 3,
"hypertile_enable_vae": false,
"hypertile_max_depth_vae": 3,
"hypertile_max_tile_vae": 128,
"hypertile_swap_size_vae": 3,
"sd_model_checkpoint": "anything-V2.1-pruned-fp16.safetensors [a4e2e83962]",
"sd_checkpoint_hash": "a4e2e839627cb4a0e12239a319d5bbd2a9fe250599827211c213da5bd8319e8a"
},
"Startup": {
"total": 7.657195329666138,
"records": {
"initial startup": 0.005000591278076172,
"prepare environment/checks": 0.0,
"prepare environment/git version info": 0.14800167083740234,
"prepare environment/clone repositores": 0.41476988792419434,
"prepare environment/run extensions installers": 0.0,
"prepare environment": 11.748481512069702,
"launcher": 0.003000974655151367,
"import torch": 0.0010106563568115234,
"import gradio": 0.0,
"setup paths": 0.0009870529174804688,
"import ldm": 0.0040013790130615234,
"import sgm": 0.0,
"initialize shared": 2.3489620685577393,
"other imports": 0.11570477485656738,
"opts onchange": 0.0009961128234863281,
"setup SD model": 0.0009713172912597656,
"setup codeformer": 0.0029993057250976562,
"setup gfpgan": 0.026032209396362305,
"set samplers": 0.0,
"list extensions": 0.003966331481933594,
"restore config state file": 0.0,
"list SD models": 0.0020008087158203125,
"list localizations": 0.0009996891021728516,
"load scripts/custom_code.py": 0.008005380630493164,
"load scripts/img2imgalt.py": 0.0009958744049072266,
"load scripts/loopback.py": 0.0009999275207519531,
"load scripts/outpainting_mk_2.py": 0.0,
"load scripts/poor_mans_outpainting.py": 0.0010328292846679688,
"load scripts/postprocessing_caption.py": 0.0009965896606445312,
"load scripts/postprocessing_codeformer.py": 0.0,
"load scripts/postprocessing_create_flipped_copies.py": 0.0010001659393310547,
"load scripts/postprocessing_focal_crop.py": 0.0010004043579101562,
"load scripts/postprocessing_gfpgan.py": 0.0,
"load scripts/postprocessing_split_oversized.py": 0.0010106563568115234,
"load scripts/postprocessing_upscale.py": 0.0009589195251464844,
"load scripts/processing_autosized_crop.py": 0.0,
"load scripts/prompt_matrix.py": 0.0009989738464355469,
"load scripts/prompts_from_file.py": 0.0,
"load scripts/sd_upscale.py": 0.0010142326354980469,
"load scripts/xyz_grid.py": 0.002984285354614258,
"load scripts/ldsr_model.py": 2.3553531169891357,
"load scripts/lora_script.py": 0.28450727462768555,
"load scripts/scunet_model.py": 0.04968881607055664,
"load scripts/swinir_model.py": 0.043997764587402344,
"load scripts/hotkey_config.py": 0.0010006427764892578,
"load scripts/extra_options_section.py": 0.0019731521606445312,
"load scripts/hypertile_script.py": 0.10006999969482422,
"load scripts/hypertile_xyz.py": 0.001004934310913086,
"load scripts/soft_inpainting.py": 0.0,
"load scripts/comments.py": 0.046996116638183594,
"load scripts/refiner.py": 0.0010035037994384766,
"load scripts/seed.py": 0.0009682178497314453,
"load scripts": 2.907561779022217,
"load upscalers": 0.009033441543579102,
"refresh VAE": 0.001966714859008789,
"refresh textual inversion templates": 0.0,
"scripts list_optimizers": 0.0030286312103271484,
"scripts list_unets": 0.0,
"reload hypernetworks": 0.005004405975341797,
"initialize extra networks": 0.021974563598632812,
"scripts before_ui_callback": 0.008992433547973633,
"create ui": 0.8062641620635986,
"gradio launch": 0.35697317123413086,
"add APIs": 0.45499157905578613,
"app_started_callback/html_resources": 0.0009996891021728516,
"app_started_callback/lora_script.py": 0.0009999275207519531,
"app_started_callback": 0.0019996166229248047
}
},
"Packages": [
"accelerate==0.21.0",
"aenum==3.1.15",
"aiofiles==23.2.1",
"aiohttp==3.9.3",
"aiosignal==1.3.1",
"alembic==1.13.1",
"altair==5.2.0",
"antlr4-python3-runtime==4.9.3",
"anyio==3.7.1",
"async-timeout==4.0.3",
"attrs==23.2.0",
"blendmodes==2022",
"certifi==2024.2.2",
"charset-normalizer==3.3.2",
"clean-fid==0.1.35",
"click==8.1.7",
"clip==1.0",
"colorama==0.4.6",
"coloredlogs==15.0.1",
"colorlog==6.8.2",
"contourpy==1.2.0",
"cycler==0.12.1",
"datasets==2.18.0",
"deprecation==2.1.0",
"diffusers==0.26.3",
"dill==0.3.8",
"einops==0.4.1",
"exceptiongroup==1.2.0",
"facexlib==0.3.0",
"fastapi==0.94.0",
"ffmpy==0.3.2",
"filelock==3.13.1",
"filterpy==1.4.5",
"flatbuffers==24.3.7",
"fonttools==4.49.0",
"frozenlist==1.4.1",
"fsspec==2024.2.0",
"ftfy==6.1.3",
"gitdb==4.0.11",
"gitpython==3.1.32",
"gradio-client==0.5.0",
"gradio==3.41.2",
"greenlet==3.0.3",
"h11==0.12.0",
"httpcore==0.15.0",
"httpx==0.24.1",
"huggingface-hub==0.21.4",
"humanfriendly==10.0",
"idna==3.6",
"imageio==2.34.0",
"importlib-metadata==7.0.2",
"importlib-resources==6.1.3",
"inflection==0.5.1",
"jinja2==3.1.3",
"jsonmerge==1.8.0",
"jsonschema-specifications==2023.12.1",
"jsonschema==4.21.1",
"kiwisolver==1.4.5",
"kornia==0.6.7",
"lark==1.1.2",
"lazy-loader==0.3",
"lightning-utilities==0.10.1",
"llvmlite==0.42.0",
"mako==1.3.2",
"markupsafe==2.1.5",
"matplotlib==3.8.3",
"mpmath==1.3.0",
"multidict==6.0.5",
"multiprocess==0.70.16",
"networkx==3.2.1",
"numba==0.59.0",
"numpy==1.26.2",
"olive-ai==0.5.0",
"omegaconf==2.2.3",
"onnx==1.15.0",
"onnxruntime-directml==1.17.1",
"onnxruntime==1.17.1",
"open-clip-torch==2.20.0",
"opencv-python==4.9.0.80",
"optimum==1.17.1",
"optuna==3.5.0",
"orjson==3.9.15",
"packaging==24.0",
"pandas==2.2.1",
"piexif==1.1.3",
"pillow==9.5.0",
"pip==23.3.1",
"protobuf==3.20.3",
"psutil==5.9.5",
"pyarrow-hotfix==0.6",
"pyarrow==15.0.1",
"pydantic==1.10.14",
"pydub==0.25.1",
"pyparsing==3.1.2",
"pyreadline3==3.4.1",
"python-dateutil==2.9.0.post0",
"python-multipart==0.0.9",
"pytorch-lightning==1.9.4",
"pytz==2024.1",
"pywavelets==1.5.0",
"pyyaml==6.0.1",
"referencing==0.33.0",
"regex==2023.12.25",
"requests==2.31.0",
"resize-right==0.0.2",
"rpds-py==0.18.0",
"safetensors==0.4.2",
"scikit-image==0.21.0",
"scipy==1.12.0",
"semantic-version==2.10.0",
"sentencepiece==0.2.0",
"setuptools==68.2.2",
"six==1.16.0",
"smmap==5.0.1",
"sniffio==1.3.1",
"spandrel==0.1.6",
"sqlalchemy==2.0.28",
"starlette==0.26.1",
"sympy==1.12",
"tifffile==2024.2.12",
"timm==0.9.16",
"tokenizers==0.13.3",
"tomesd==0.1.3",
"toolz==0.12.1",
"torch-directml==0.1.13.1.dev230413",
"torch==1.13.1+cu117",
"torchdiffeq==0.2.3",
"torchmetrics==1.3.1",
"torchsde==0.2.6",
"torchvision==0.14.1+cu117",
"tqdm==4.66.2",
"trampoline==0.1.2",
"transformers==4.30.2",
"typing-extensions==4.10.0",
"tzdata==2024.1",
"urllib3==2.2.1",
"uvicorn==0.28.0",
"wcwidth==0.2.13",
"websockets==11.0.3",
"wheel==0.41.2",
"xxhash==3.4.1",
"yarl==1.9.4",
"zipp==3.17.0"
]
}
Console logs
Additional information
No response
The text was updated successfully, but these errors were encountered: