Anyone else experiencing an issue like this?
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/gradio/routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1431, in process_api
result = await self.call_function(
File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1117, in call_function
prediction = await utils.async_iteration(iterator)
File "/usr/local/lib/python3.10/dist-packages/gradio/utils.py", line 350, in async_iteration
return await iterator.__anext__()
File "/usr/local/lib/python3.10/dist-packages/gradio/utils.py", line 343, in __anext__
return await anyio.to_thread.run_sync(
File "/usr/local/lib/python3.10/dist-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/usr/local/lib/python3.10/dist-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/usr/local/lib/python3.10/dist-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/usr/local/lib/python3.10/dist-packages/gradio/utils.py", line 326, in run_sync_iterator_async
return next(iterator)
File "/usr/local/lib/python3.10/dist-packages/gradio/utils.py", line 695, in gen_wrapper
yield from f(*args, **kwargs)
File "/content/sdw/extensions/sd-webui-inpaint-anything/ia_threading.py", line 119, in yield_wrapper
yield from func(*args, **kwargs)
File "/content/sdw/extensions/sd-webui-inpaint-anything/scripts/inpaint_anything.py", line 794, in run_webui_inpaint
processed = process_images(p)
File "/content/sdw/modules/processing.py", line 734, in process_images
res = process_images_inner(p)
File "/content/sdw/extensions/ControlNet/scripts/batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "/content/sdw/modules/processing.py", line 804, in process_images_inner
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
File "/content/sdw/modules/processing.py", line 1495, in init
self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
File "/content/sdw/modules/sd_samplers_common.py", line 110, in images_tensor_to_samples
x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))
File "/content/sdw/modules/sd_hijack_utils.py", line 17, in <lambda>
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "/content/sdw/modules/sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/sdw/repositories/stable-diffusion-stability-ai/ldm/models/diffusion/ddpm.py", line 830, in encode_first_stage
return self.first_stage_model.encode(x)
File "/content/sdw/repositories/stable-diffusion-stability-ai/ldm/models/autoencoder.py", line 83, in encode
h = self.encoder(x)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/content/sdw/repositories/stable-diffusion-stability-ai/ldm/modules/diffusionmodules/model.py", line 536, in forward
h = self.mid.attn_1(h)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/content/sdw/modules/sd_hijack_optimizations.py", line 649, in sdp_attnblock_forward
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=0.0, is_causal=False)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 10.97 GiB (GPU 0; 15.73 GiB total capacity; 8.84 GiB already allocated; 4.92 GiB free; 10.54 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF