diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..2656703 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,42 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Recording[[:space:]]2024-04-18[[:space:]]154413.mp4 filter=lfs diff=lfs merge=lfs -text +input2.mp4 filter=lfs diff=lfs merge=lfs -text +llll.mp4 filter=lfs diff=lfs merge=lfs -text +input.mp4 filter=lfs diff=lfs merge=lfs -text +zdsfsd.mp4 filter=lfs diff=lfs merge=lfs -text +0521.mp4 filter=lfs diff=lfs merge=lfs -text +0521(1).mp4 filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..e310241 --- /dev/null +++ b/README.md @@ -0,0 +1,13 @@ +--- +title: Video Background Remover +emoji: 🎥📷 +colorFrom: green +colorTo: purple +sdk: gradio +sdk_version: 4.31.4 +app_file: app.py +pinned: true +license: creativeml-openrail-m +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000..d4b1487 --- /dev/null +++ b/app.py @@ -0,0 +1,70 @@ +import spaces +import gradio as gr +import cv2 +import numpy as np +import time +import random +from PIL import Image +import torch + +torch.jit.script = lambda f: f + +from transparent_background import Remover + +@spaces.GPU() +def doo(video, mode, progress=gr.Progress()): + + if mode == 'Fast': + remover = Remover(mode='fast') + else: + remover = Remover() + + cap = cv2.VideoCapture(video) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total frames + writer = None + tmpname = random.randint(111111111, 999999999) + processed_frames = 0 + start_time = time.time() + + + while cap.isOpened(): + ret, frame = cap.read() + + if ret is False: + break + + if time.time() - start_time >= 20 * 60 - 5: + print("GPU Timing Out") + cap.release() + writer.release() + return str(tmpname) + '.mp4' + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + img = Image.fromarray(frame).convert('RGB') + + + if writer is None: + writer = cv2.VideoWriter(str(tmpname) + '.mp4', cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS), img.size) + + processed_frames += 1 + print(f"Processing frame {processed_frames}") + progress(processed_frames / total_frames, desc=f"Processing frame {processed_frames}/{total_frames}") + out = remover.process(img, type='green') + writer.write(cv2.cvtColor(np.array(out), cv2.COLOR_BGR2RGB)) + + cap.release() + writer.release() + return str(tmpname) + '.mp4' + + + +description="Bigger the file size, Longer the time takes. May got GPU timeout ( Abort / Error )" +examples = [['./input2.mp4'],['./input.mp4']] + +iface = gr.Interface( + fn=doo, + inputs=["video", gr.components.Radio(['Standard', 'Quick'], label='Select mode', value='Normal', info='Standard is more accurate but takes longer⏪, while quick is quicker but less accurate.⏩')], + outputs="video", + examples=examples, + description=description +) +iface.launch() diff --git a/input.mp4 b/input.mp4 new file mode 100644 index 0000000..87a2918 Binary files /dev/null and b/input.mp4 differ diff --git a/input2.mp4 b/input2.mp4 new file mode 100644 index 0000000..8107100 Binary files /dev/null and b/input2.mp4 differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..4d7a1a4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +torch +torchvision +opencv-python +timm +tqdm +kornia +gdown +transparent-background \ No newline at end of file