add all the old services
This commit is contained in:
parent
2672190fd2
commit
5f813f4b1e
34
services/background-removal-dis.py
Normal file
34
services/background-removal-dis.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from services import Stack
|
||||||
|
|
||||||
|
|
||||||
|
class BGRemovalDIS(Stack):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
'BGRemovalDIS',
|
||||||
|
'bg-remove-dis-rocm',
|
||||||
|
5005,
|
||||||
|
'https://huggingface.co/spaces/ECCV2022/dis-background-removal'
|
||||||
|
)
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
self.git_clone(url=self.url, dest="webui")
|
||||||
|
self.install_requirements("webui/requirements.txt")
|
||||||
|
self.pip_install("gradio") # gradio is not in requirements.txt for some reason
|
||||||
|
|
||||||
|
self.remove_line_in_file("os.", "webui/app.py") # remove manual clone of DIS from app.py (done below)
|
||||||
|
|
||||||
|
self.git_clone("https://github.com/xuebinqin/DIS.git", dest="tmp-dis")
|
||||||
|
self.move_all_files_in_dir("tmp-dis/IS-Net", "webui")
|
||||||
|
self.remove_dir("tmp-dis")
|
||||||
|
|
||||||
|
self.create_dir("webui/saved_models")
|
||||||
|
self.move_file_or_dir("webui/isnet.pth", "webui/saved_models/isnet.pth")
|
||||||
|
|
||||||
|
# self.remove_dir("webui/.git") # saves a lot of space due to big repo
|
||||||
|
|
||||||
|
super().install()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
args = ["--port", str(self.port)]
|
||||||
|
self.python(f"app.py {' '.join(args)}", current_dir="webui",
|
||||||
|
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"])
|
34
services/comfyui.py
Normal file
34
services/comfyui.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from services import Stack
|
||||||
|
|
||||||
|
|
||||||
|
class ComfyUI(Stack):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
'ComfyUI',
|
||||||
|
'comfyui-rocm',
|
||||||
|
5004,
|
||||||
|
'https://github.com/comfyanonymous/ComfyUI.git'
|
||||||
|
)
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
# Install the webui
|
||||||
|
self.git_clone(url=self.url, dest="webui")
|
||||||
|
self.install_requirements("webui/requirements.txt")
|
||||||
|
|
||||||
|
# Install the manager
|
||||||
|
self.git_clone(url="https://github.com/ltdrdata/ComfyUI-Manager.git", dest="webui/custom_nodes/manager")
|
||||||
|
|
||||||
|
# Add GGUF support
|
||||||
|
self.pip_install(["gguf", "numpy==1.26.4"])
|
||||||
|
|
||||||
|
# Add NF4 support for Flux
|
||||||
|
self.install_from_prebuilt("bitsandbytes")
|
||||||
|
self.git_clone(url="https://github.com/comfyanonymous/ComfyUI_bitsandbytes_NF4.git",
|
||||||
|
dest="webui/custom_nodes/ComfyUI_bitsandbytes_NF4")
|
||||||
|
|
||||||
|
super().install()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
args = ["--port", str(self.port)]
|
||||||
|
self.python(f"main.py {' '.join(args)}", current_dir="webui",
|
||||||
|
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"])
|
27
services/stablediffusion-forge.py
Normal file
27
services/stablediffusion-forge.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
from services import Stack
|
||||||
|
|
||||||
|
|
||||||
|
class StableDiffusionForge(Stack):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
'StableDiffusion Forge WebUI',
|
||||||
|
'stablediffusion-forge-rocm',
|
||||||
|
5003,
|
||||||
|
'https://github.com/lllyasviel/stable-diffusion-webui-forge'
|
||||||
|
)
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
# Install the webui
|
||||||
|
self.git_clone(url=self.url, dest="webui")
|
||||||
|
|
||||||
|
self.python("launch.py --skip-torch-cuda-test --exit", current_dir="webui")
|
||||||
|
|
||||||
|
# Add NF4 support for Flux
|
||||||
|
self.install_from_prebuilt("bitsandbytes")
|
||||||
|
|
||||||
|
super().install()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
args = ["--listen", "--enable-insecure-extension-access", "--port", str(self.port)]
|
||||||
|
self.python(f"launch.py {' '.join(args)}", current_dir="webui",
|
||||||
|
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"])
|
27
services/stablediffusion-webui.py
Normal file
27
services/stablediffusion-webui.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
from services import Stack
|
||||||
|
|
||||||
|
|
||||||
|
class StableDiffusionForge(Stack):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
'StableDiffusion WebUI',
|
||||||
|
'stablediffusion-webui-rocm',
|
||||||
|
5002,
|
||||||
|
'https://github.com/AUTOMATIC1111/stable-diffusion-webui'
|
||||||
|
)
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
# Install the webui
|
||||||
|
self.git_clone(url=self.url, branch="dev", dest="webui")
|
||||||
|
|
||||||
|
self.python("launch.py --skip-torch-cuda-test --exit", current_dir="webui")
|
||||||
|
|
||||||
|
# Add NF4 support for Flux
|
||||||
|
self.install_from_prebuilt("bitsandbytes")
|
||||||
|
|
||||||
|
super().install()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
args = ["--listen", "--enable-insecure-extension-access", "--port", str(self.port)]
|
||||||
|
self.python(f"launch.py {' '.join(args)}", current_dir="webui",
|
||||||
|
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"])
|
56
services/txtgen.py
Normal file
56
services/txtgen.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
from services import Stack
|
||||||
|
|
||||||
|
|
||||||
|
class TextGeneration(Stack):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
'Text Generation',
|
||||||
|
'text-generation-rocm',
|
||||||
|
5000,
|
||||||
|
'https://github.com/oobabooga/text-generation-webui/'
|
||||||
|
)
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
# Install LlamaCpp from prebuilt
|
||||||
|
self.pip("install llama-cpp-python", ["CMAKE_ARGS=\"-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS\""]) # cpu
|
||||||
|
|
||||||
|
# Install LlamaCpp for ROCM from source
|
||||||
|
# self.pip("install llama-cpp-python", ["CMAKE_ARGS=\"-DGGML_HIPBLAS=on\" FORCE_CMAKE=1"]) # manual gpu (only works if whole rocm suite installed)
|
||||||
|
# self.install_from_prebuilt("llama_cpp_python") # gpu (only works if whole rocm suite installed)
|
||||||
|
|
||||||
|
# Install Triton for ROCM from prebuilt
|
||||||
|
# self.install_from_prebuilt("triton")
|
||||||
|
|
||||||
|
# Install Triton for ROCM from source
|
||||||
|
# self.git_clone(url="https://github.com/ROCmSoftwarePlatform/triton.git")
|
||||||
|
# self.pip_install(['ninja', 'cmake'])
|
||||||
|
# self.pip("install -e .", path="triton")
|
||||||
|
|
||||||
|
# Install the webui
|
||||||
|
self.git_clone(url=self.url, dest="webui")
|
||||||
|
self.remove_line_in_file(["accelerate", "lm_eval", "optimum", "autoawq", "llama_cpp_python"],
|
||||||
|
"../text-generation-rocm/webui/requirements_amd.txt")
|
||||||
|
self.install_requirements("../text-generation-rocm/webui/requirements_amd.txt")
|
||||||
|
self.pip_install(["accelerate", "optimum"])
|
||||||
|
self.pip_install(
|
||||||
|
"https://github.com/casper-hansen/AutoAWQ_kernels/releases/download/v0.0.7/autoawq_kernels-0.0.7+rocm571-cp310-cp310-linux_x86_64.whl",
|
||||||
|
no_deps=True)
|
||||||
|
self.pip_install(
|
||||||
|
"https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.6/autoawq-0.2.6-cp310-cp310-linux_x86_64.whl",
|
||||||
|
no_deps=True)
|
||||||
|
# Fix llama trying to use cuda version
|
||||||
|
self.remove_line_in_file("llama_cpp_cuda", "../text-generation-rocm/webui/modules/llama_cpp_python_hijack.py")
|
||||||
|
|
||||||
|
# Install useful packages
|
||||||
|
self.pip_install(
|
||||||
|
"https://github.com/turboderp/exllamav2/releases/download/v0.1.9/exllamav2-0.1.9+rocm6.1.torch2.4.0-cp310-cp310-linux_x86_64.whl")
|
||||||
|
self.install_from_prebuilt("bitsandbytes")
|
||||||
|
self.pip(
|
||||||
|
"install auto-gptq --no-build-isolation --extra-index-url https://huggingface.github.io/autogptq-index/whl/rocm573/")
|
||||||
|
|
||||||
|
super().install()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
args = ["--listen", "--listen-port", str(self.port)]
|
||||||
|
self.python(f"server.py {' '.join(args)}", current_dir="webui",
|
||||||
|
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"])
|
38
services/xtts.py
Normal file
38
services/xtts.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
from services import Stack
|
||||||
|
|
||||||
|
|
||||||
|
class XTTS(Stack):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
'XTTS WebUI',
|
||||||
|
'xtts-rocm',
|
||||||
|
5001,
|
||||||
|
'https://github.com/daswer123/xtts-webui'
|
||||||
|
)
|
||||||
|
|
||||||
|
def install(self):
|
||||||
|
# Install the webui
|
||||||
|
self.git_clone(url=self.url, dest="webui")
|
||||||
|
|
||||||
|
self.remove_line_in_file("torch", "webui/requirements.txt")
|
||||||
|
self.install_requirements("webui/requirements.txt")
|
||||||
|
|
||||||
|
# sed -i 's/device = "cuda" if torch.cuda.is_available() else "cpu"/device = "cpu"/' webui/scripts/utils/formatter.py
|
||||||
|
# sed -i 's/asr_model = WhisperModel(whisper_model, device=device, compute_type="float16")/asr_model = WhisperModel(whisper_model, device=device, compute_type="int8")/' webui/scripts/utils/formatter.py
|
||||||
|
|
||||||
|
# Disable gpu for faster-whipser as ROCM isn't supported yet
|
||||||
|
self.replace_line_in_file("device = \"cuda\" if torch.cuda.is_available() else \"cpu\"", "device = \"cpu\"",
|
||||||
|
"webui/scripts/utils/formatter.py")
|
||||||
|
self.replace_line_in_file("asr_model = WhisperModel(whisper_model, device=device, compute_type=\"float16\")",
|
||||||
|
"asr_model = WhisperModel(whisper_model, device=device, compute_type=\"int8\")",
|
||||||
|
"webui/scripts/utils/formatter.py")
|
||||||
|
|
||||||
|
# Deepspeed and ninja (not working yet)
|
||||||
|
# self.pip_install(["ninja", "deepspeed"])
|
||||||
|
|
||||||
|
super().install()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
args = ["--host", "0.0.0.0", "--port", str(self.port)]
|
||||||
|
self.python(f"server.py {' '.join(args)}", current_dir="webui",
|
||||||
|
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"])
|
Loading…
Reference in New Issue
Block a user