diff --git a/docs/documentation.markdown b/docs/documentation.markdown index c034f03..aecd4fa 100644 --- a/docs/documentation.markdown +++ b/docs/documentation.markdown @@ -172,6 +172,8 @@ You don't have to use the api_utils.py file and pydantic types, but it can save ## Experimental GPU support +You need a sufficiently recent version of your SD environment. Using the GPU has a lot of little drawbacks to understand, but the performance gain is substantial. + In Version 1.2.1, the ability to use the GPU has been added, a setting that can be configured in SD at startup. Currently, this feature is only supported on Windows and Linux, as the necessary dependencies for Mac have not been included. The `--faceswaplab_gpu` option in SD can be added to the args in webui-user.sh or webui-user.bat. @@ -184,6 +186,16 @@ The `auto_det_size` option emulates the old behavior. It has no difference on CP If you enabled GPU and you are sure you avec a CUDA compatible card and the model keep using CPU provider, please checks that you have onnxruntime-gpu installed. +### SD.NEXT and GPU + +Please read carefully. + +Using the GPU requires the use of the onnxruntime-gpu>=1.15.0 dependency. For the moment, this conflicts with older SD.Next dependencies (tensorflow, which uses numpy and potentially rembg). You will need to check numpy>=1.24.2 and tensorflow>=2.13.0. + +You should therefore be able to debug a little before activating the option. If you don't feel up to it, it's best not to use it. + +The first time the swap is used, the program will continue to use the CPU, but will offer to install the GPU. You will then need to restart. This is due to the optimizations made by SD.Next to the installation scripts. + ## Settings You can change the program's default behavior in your webui's global settings (FaceSwapLab section in settings). This is particularly useful if you want to have default options for inpainting or for post-processsing, for example. diff --git a/docs/install.markdown b/docs/install.markdown index e931f40..e5d58c3 100644 --- a/docs/install.markdown +++ b/docs/install.markdown @@ -20,6 +20,12 @@ Before beginning the installation process, if you are using Windows, you need to 3. OR if you don't want to install either the full Visual Studio suite or the VS C++ Build Tools: Follow the instructions provided in section VIII of the documentation. +## SD.Next / Vladmantic + +SD.Next loading optimizations in relation to extension installation scripts can sometimes cause problems. This is particularly the case if you copy the script without installing it via the interface. + +If you get an error after startup, try restarting the server. + ## Manual Install To install the extension, follow the steps below: diff --git a/install.py b/install.py index 169a434..e9aa578 100644 --- a/install.py +++ b/install.py @@ -5,48 +5,58 @@ from modules import shared from packaging.version import parse -use_gpu = getattr(shared.cmd_opts, "faceswaplab_gpu", False) - -if use_gpu and sys.platform != "darwin": - req_file = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "requirements-gpu.txt" - ) -else: - req_file = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "requirements.txt" - ) - - -def is_installed(package: str) -> bool: - package_name = package.split("==")[0].split(">=")[0].strip() - try: - installed_version = parse(pkg_resources.get_distribution(package_name).version) - except pkg_resources.DistributionNotFound: - return False - - if "==" in package: - required_version = parse(package.split("==")[1]) - return installed_version == required_version - elif ">=" in package: - required_version = parse(package.split(">=")[1]) - return installed_version >= required_version - else: - return True +def check_install() -> None: + use_gpu = getattr( + shared.cmd_opts, "faceswaplab_gpu", False + ) or shared.opts.data.get("faceswaplab_use_gpu", False) -print("Checking faceswaplab requirements") -with open(req_file) as file: - for package in file: - try: - package = package.strip() + if use_gpu and sys.platform != "darwin": + req_file = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "requirements-gpu.txt" + ) + else: + req_file = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "requirements.txt" + ) - if not is_installed(package): - print(f"Install {package}") - launch.run_pip( - f"install {package}", f"sd-webui-faceswaplab requirement: {package}" + def is_installed(package: str) -> bool: + package_name = package.split("==")[0].split(">=")[0].strip() + try: + installed_version = parse( + pkg_resources.get_distribution(package_name).version + ) + except pkg_resources.DistributionNotFound: + return False + + if "==" in package: + required_version = parse(package.split("==")[1]) + return installed_version == required_version + elif ">=" in package: + required_version = parse(package.split(">=")[1]) + return installed_version >= required_version + else: + return True + + print("Checking faceswaplab requirements") + with open(req_file) as file: + for package in file: + try: + package = package.strip() + + if not is_installed(package): + print(f"Install {package}") + launch.run_pip( + f"install {package}", + f"sd-webui-faceswaplab requirement: {package}", + ) + + except Exception as e: + print(e) + print( + f"Warning: Failed to install {package}, faceswaplab will not work." ) + raise e + - except Exception as e: - print(e) - print(f"Warning: Failed to install {package}, faceswaplab will not work.") - raise e +check_install() diff --git a/scripts/configure.py b/scripts/configure.py index 74c8b9c..1340690 100644 --- a/scripts/configure.py +++ b/scripts/configure.py @@ -1,24 +1,57 @@ import os from tqdm import tqdm +import traceback import urllib.request from scripts.faceswaplab_utils.faceswaplab_logging import logger -from scripts.faceswaplab_swapping.swapper import is_sha1_matching -from scripts.faceswaplab_utils.models_utils import get_models from scripts.faceswaplab_globals import * from packaging import version import pkg_resources +import hashlib ALREADY_DONE = False +def check_install() -> None: + # Very ugly hack :( due to sdnext optimization not calling install.py every time if git log has not changed + import importlib.util + import sys + import os + + current_dir = os.path.dirname(os.path.realpath(__file__)) + check_install_path = os.path.join(current_dir, "..", "install.py") + spec = importlib.util.spec_from_file_location("check_install", check_install_path) + check_install = importlib.util.module_from_spec(spec) + sys.modules["check_install"] = check_install + spec.loader.exec_module(check_install) + check_install.check_install() # type: ignore + #### End of ugly hack :( ! + + +def is_sha1_matching(file_path: str, expected_sha1: str) -> bool: + sha1_hash = hashlib.sha1(usedforsecurity=False) + try: + with open(file_path, "rb") as file: + for byte_block in iter(lambda: file.read(4096), b""): + sha1_hash.update(byte_block) + if sha1_hash.hexdigest() == expected_sha1: + return True + else: + return False + except Exception as e: + logger.error( + "Failed to check model hash, check the model is valid or has been downloaded adequately : %e", + e, + ) + traceback.print_exc() + return False + + def check_configuration() -> None: global ALREADY_DONE if ALREADY_DONE: return - logger.info(f"FaceSwapLab {VERSION_FLAG} Config :") - # This has been moved here due to pb with sdnext in install.py not doing what a1111 is doing. models_dir = MODELS_DIR faces_dir = FACES_DIR @@ -48,6 +81,9 @@ def download(url: str, path: str) -> None: os.makedirs(models_dir, exist_ok=True) os.makedirs(faces_dir, exist_ok=True) + if not os.path.exists(model_path): + download(model_url, model_path) + if not is_sha1_matching(model_path, EXPECTED_INSWAPPER_SHA1): logger.error( "Suspicious sha1 for model %s, check the model is valid or has been downloaded adequately. Should be %s", @@ -63,17 +99,4 @@ def download(url: str, path: str) -> None: gradio_version, ) - if not os.path.exists(model_path): - download(model_url, model_path) - - def print_infos() -> None: - logger.info("FaceSwapLab config :") - logger.info("+ MODEL DIR : %s", models_dir) - models = get_models() - logger.info("+ MODELS: %s", models) - logger.info("+ FACES DIR : %s", faces_dir) - logger.info("+ ANALYZER DIR : %s", ANALYZER_DIR) - - print_infos() - ALREADY_DONE = True diff --git a/scripts/faceswaplab.py b/scripts/faceswaplab.py index a8d0b6b..835be58 100644 --- a/scripts/faceswaplab.py +++ b/scripts/faceswaplab.py @@ -1,8 +1,11 @@ +from scripts.configure import check_configuration + +check_configuration() + import importlib import traceback from scripts import faceswaplab_globals -from scripts.configure import check_configuration from scripts.faceswaplab_api import faceswaplab_api from scripts.faceswaplab_postprocessing import upscaling from scripts.faceswaplab_settings import faceswaplab_settings @@ -12,18 +15,22 @@ from scripts.faceswaplab_utils.models_utils import get_current_model from scripts.faceswaplab_utils.typing import * from scripts.faceswaplab_utils.ui_utils import dataclasses_from_flat_list +from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug # Reload all the modules when using "apply and restart" # This is mainly done for development purposes -importlib.reload(swapper) -importlib.reload(faceswaplab_logging) -importlib.reload(faceswaplab_globals) -importlib.reload(imgutils) -importlib.reload(upscaling) -importlib.reload(faceswaplab_settings) -importlib.reload(models_utils) -importlib.reload(faceswaplab_unit_ui) -importlib.reload(faceswaplab_api) +import logging + +if logger.getEffectiveLevel() <= logging.DEBUG: + importlib.reload(swapper) + importlib.reload(faceswaplab_logging) + importlib.reload(faceswaplab_globals) + importlib.reload(imgutils) + importlib.reload(upscaling) + importlib.reload(faceswaplab_settings) + importlib.reload(models_utils) + importlib.reload(faceswaplab_unit_ui) + importlib.reload(faceswaplab_api) import os from pprint import pformat @@ -46,7 +53,6 @@ PostProcessingOptions, ) from scripts.faceswaplab_ui.faceswaplab_unit_settings import FaceSwapUnitSettings -from scripts.faceswaplab_utils.faceswaplab_logging import logger, save_img_debug EXTENSION_PATH = os.path.join("extensions", "sd-webui-faceswaplab") @@ -67,7 +73,6 @@ class FaceSwapScript(scripts.Script): def __init__(self) -> None: super().__init__() - check_configuration() @property def units_count(self) -> int: diff --git a/scripts/faceswaplab_settings/faceswaplab_settings.py b/scripts/faceswaplab_settings/faceswaplab_settings.py index 7d715ed..d173e34 100644 --- a/scripts/faceswaplab_settings/faceswaplab_settings.py +++ b/scripts/faceswaplab_settings/faceswaplab_settings.py @@ -16,6 +16,16 @@ def on_ui_settings() -> None: section=section, ), ) + shared.opts.add_option( + "faceswaplab_use_gpu", + shared.OptionInfo( + False, + "Use GPU, only for CUDA on Windows/Linux - experimental and risky, can messed up dependencies (requires restart)", + gr.Checkbox, + {"interactive": True}, + section=section, + ), + ) shared.opts.add_option( "faceswaplab_keep_original", shared.OptionInfo( diff --git a/scripts/faceswaplab_swapping/swapper.py b/scripts/faceswaplab_swapping/swapper.py index 04fa77b..1c3a55b 100644 --- a/scripts/faceswaplab_swapping/swapper.py +++ b/scripts/faceswaplab_swapping/swapper.py @@ -9,7 +9,6 @@ import sys from io import StringIO from contextlib import contextmanager -import hashlib import cv2 import insightface @@ -40,20 +39,44 @@ from modules import shared import onnxruntime -USE_GPU = ( - getattr(shared.cmd_opts, "faceswaplab_gpu", False) and sys.platform != "darwin" -) -providers = ["CPUExecutionProvider"] -if USE_GPU and sys.platform != "darwin": - if "CUDAExecutionProvider" in onnxruntime.get_available_providers(): - providers = ["CUDAExecutionProvider"] - else: - logger.error( - "CUDAExecutionProvider not found in onnxruntime.available_providers : %s, use CPU instead. Check onnxruntime-gpu is installed.", - onnxruntime.get_available_providers(), - ) - USE_GPU = False +def use_gpu() -> bool: + return ( + getattr(shared.cmd_opts, "faceswaplab_gpu", False) + or opts.data.get("faceswaplab_use_gpu", False) + ) and sys.platform != "darwin" + + +@lru_cache +def force_install_gpu_providers() -> None: + # Ugly Ugly hack due to SDNEXT : + from scripts.configure import check_install + + logger.warning("Try to reinstall gpu dependencies") + check_install() + logger.warning("IF onnxruntime-gpu has been installed successfully, RESTART") + logger.warning( + "On SD.NEXT/vladmantic you will also need to check numpy>=1.24.2 and tensorflow>=2.13.0" + ) + + +def get_providers() -> List[str]: + providers = ["CPUExecutionProvider"] + if use_gpu(): + if "CUDAExecutionProvider" in onnxruntime.get_available_providers(): + providers = ["CUDAExecutionProvider"] + else: + logger.error( + "CUDAExecutionProvider not found in onnxruntime.available_providers : %s, use CPU instead. Check onnxruntime-gpu is installed.", + onnxruntime.get_available_providers(), + ) + force_install_gpu_providers() + + return providers + + +def is_cpu_provider() -> bool: + return get_providers() == ["CPUExecutionProvider"] def cosine_similarity_face(face1: Face, face2: Face) -> float: @@ -281,20 +304,6 @@ def capture_stdout() -> Generator[StringIO, None, None]: sys.stdout = original_stdout # Type: ignore -# On GPU we can keep a non prepared model in ram and deepcopy it every time det_size change (old behaviour) -@lru_cache(maxsize=1) -def get_cpu_analysis() -> insightface.app.FaceAnalysis: - return insightface.app.FaceAnalysis( - name="buffalo_l", - providers=providers, - root=faceswaplab_globals.ANALYZER_DIR, - ) - - -# FIXME : This function is way more complicated than it could be. -# It is done that way to preserve the original behavior with CPU. -# Most users don't reed the doc, so we need to keep the features as close as possible -# to original behavior. @lru_cache(maxsize=3) def getAnalysisModel( det_size: Tuple[int, int] = (640, 640), det_thresh: float = 0.5 @@ -309,29 +318,26 @@ def getAnalysisModel( if not os.path.exists(faceswaplab_globals.ANALYZER_DIR): os.makedirs(faceswaplab_globals.ANALYZER_DIR) + providers = get_providers() logger.info( - f"Load analysis model det_size={det_size}, det_thresh={det_thresh}, gpu={USE_GPU}, providers = {providers}, will take some time. (> 30s)" + f"Load analysis model det_size={det_size}, det_thresh={det_thresh}, providers = {providers}, will take some time. (> 30s)" ) # Initialize the analysis model with the specified name and providers with tqdm( - total=1, desc="Loading analysis model (first time is slow)", unit="model" + total=1, + desc=f"Loading {det_size} analysis model (first time is slow)", + unit="model", ) as pbar: with capture_stdout() as captured: - if USE_GPU: - model = insightface.app.FaceAnalysis( - name="buffalo_l", - providers=providers, - root=faceswaplab_globals.ANALYZER_DIR, - ) - - # Prepare the analysis model for face detection with the specified detection size - model.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size) - else: - # This is a hacky way to speed up loading for gpu only - model = copy.deepcopy(get_cpu_analysis()) - model.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size) + model = insightface.app.FaceAnalysis( + name="buffalo_l", + providers=providers, + root=faceswaplab_globals.ANALYZER_DIR, + ) + # Prepare the analysis model for face detection with the specified detection size + model.prepare(ctx_id=0, det_thresh=det_thresh, det_size=det_size) pbar.update(1) logger.info("%s", pformat(captured.getvalue())) @@ -343,25 +349,6 @@ def getAnalysisModel( raise FaceModelException("Loading of analysis model failed") -def is_sha1_matching(file_path: str, expected_sha1: str) -> bool: - sha1_hash = hashlib.sha1(usedforsecurity=False) - try: - with open(file_path, "rb") as file: - for byte_block in iter(lambda: file.read(4096), b""): - sha1_hash.update(byte_block) - if sha1_hash.hexdigest() == expected_sha1: - return True - else: - return False - except Exception as e: - logger.error( - "Failed to check model hash, check the model is valid or has been downloaded adequately : %e", - e, - ) - traceback.print_exc() - return False - - @lru_cache(maxsize=1) def getFaceSwapModel(model_path: str) -> upscaled_inswapper.UpscaledINSwapper: """ @@ -374,14 +361,7 @@ def getFaceSwapModel(model_path: str) -> upscaled_inswapper.UpscaledINSwapper: insightface.model_zoo.FaceModel: The face swap model. """ try: - expected_sha1 = "17a64851eaefd55ea597ee41e5c18409754244c5" - if not is_sha1_matching(model_path, expected_sha1): - logger.error( - "Suspicious sha1 for model %s, check the model is valid or has been downloaded adequately. Should be %s", - model_path, - expected_sha1, - ) - + providers = get_providers() with tqdm(total=1, desc="Loading swap model", unit="model") as pbar: with capture_stdout() as captured: model = upscaled_inswapper.UpscaledINSwapper(