Compare commits

..

No commits in common. "v2-python" and "main" have entirely different histories.

54 changed files with 707 additions and 1290 deletions

View File

@ -1,92 +0,0 @@
name: Build and Publish Prebuilts Artifacts
on:
workflow_dispatch:
inputs:
rocm_version:
description: 'ROCm version'
required: true
default: '6.2'
torch_version:
description: 'Torch version (e.g., rocm6.2)'
required: true
default: 'rocm6.2'
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
tool-cache: false
# Step 1: Checkout the repository
- name: Checkout repository
uses: actions/checkout@v4
# Step 2: Set up Docker Buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Step 3: Build the Docker image
- name: Build Docker image
run: |
echo ${{ inputs.rocm_version }}
echo ${{ inputs.torch_version }}
docker build \
--build-arg ROCM_VERSION=${{ inputs.rocm_version }} \
--build-arg TORCH_VERSION=${{ inputs.torch_version }} \
-t prebuilt-wheels ./prebuilts
# Step 4: Create a container and run the script
- name: Run Docker container and generate wheels
run: |
docker create --name prebuilt-container prebuilt-wheels
docker start -a prebuilt-container
# Step 5: Copy bitsandbytes wheel artifact to host
- name: Copy bitsandbytes wheel to host
run: |
mkdir -p /tmp/bitsandbytes/
docker cp prebuilt-container:/tmp/bitsandbytes/dist/ /tmp/bitsandbytes/
# Step 6: Copy llama-cpp-python wheel artifact to host
- name: Copy llama-cpp-python wheel to host
run: |
mkdir -p /tmp/llama-cpp-python/
docker cp prebuilt-container:/tmp/llama-cpp-python/dist/ /tmp/llama-cpp-python/
# Step 6: Copy xformers wheel artifact to host
- name: Copy xformers wheel to host
run: |
mkdir -p /tmp/xformers/
docker cp prebuilt-container:/tmp/xformers/dist/ /tmp/xformers/
# Step 7: Upload bitsandbytes wheel artifact
- name: Upload bitsandbytes wheel
uses: actions/upload-artifact@v4
with:
name: bitsandbytes-wheels
path: /tmp/bitsandbytes/dist/*.whl
# Step 8: Upload llama-cpp-python wheel artifact
- name: Upload llama-cpp-python wheel
uses: actions/upload-artifact@v4
with:
name: llama-cpp-python-wheels
path: /tmp/llama-cpp-python/dist/*.whl
# Step 9: Upload xformers wheel artifact
- name: Upload xformers wheel
uses: actions/upload-artifact@v4
with:
name: xformers-wheels
path: /tmp/xformers/dist/*.whl
# Step 10: Cleanup Docker container
- name: Cleanup
run: |
docker rm prebuilt-container

View File

@ -1,25 +0,0 @@
name: Build and release
on:
push:
branches:
- main
tags:
- v*
jobs:
pyinstaller-build:
runs-on: ubuntu-latest
steps:
- name: Create Executable
uses: sayyid5416/pyinstaller@v1
with:
python_ver: '3.12'
spec: 'build.spec'
requirements: 'requirements.txt'
upload_exe_with_name: 'ai-suite-rocm-${{ github.ref_name }}'
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: dist/*

4
.gitignore vendored
View File

@ -1,3 +1 @@
.idea/ .idea/
prebuilts/build_output/
__pycache__/

View File

@ -1,11 +1,6 @@
# ai-suite-rocm-local # ai-suite-rocm-local
This is a simple project to make hosting multiple AI tools easily on Linux with AMD GPUs using ROCM locally (without This is a simple project to make hosting multiple AI tools easily on Linux with AMD GPUs using ROCM locally (without docker).
docker).
> [!WARNING]
> Currently rewriting this project to be more modular and easier to use. This is a work in progress.
> Instructions below outdated !
To use you have to clone the repo run the install script for the service you want to use. To use you have to clone the repo run the install script for the service you want to use.
@ -21,5 +16,7 @@ Then you can run whichever service you want using their respectives run.sh scrip
./run.sh ./run.sh
``` ```
*This has been tested on Fedora 40 with kernel 6.9.6 with an AMD RX 6800 XT.* *This has been tested on Fedora 40 with kernel 6.9.6 with an AMD RX 6800 XT.*

View File

@ -0,0 +1,2 @@
venv/
webui/

View File

@ -0,0 +1,42 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Function to install StableDiffusion
install_background_remover() {
# As no data is stored in the webui folder, we can re-run the installation process for updates
rm -R webui -f
echo "Cloning webui..."
# original repo (hf): https://huggingface.co/spaces/ECCV2022/dis-background-removal/tree/main
git clone ssh://git@git.broillet.ch:222/Clone/dis-background-removal.git webui
echo "Installing requirements..."
$python_exec -m pip install -r webui/requirements.txt
echo "Cloning DIS repo"
git clone ssh://git@git.broillet.ch:222/Clone/DIS.git tmp-dis
mv tmp-dis/IS-Net/* webui/
sudo rm -R tmp-dis
echo "Finalizing..."
mkdir webui/saved_models -p
mv webui/isnet.pth webui/saved_models
sudo rm -R webui/.git
}
# Main function
main() {
prepare_env
# Set it up
install_background_remover
clean
echo "BackgroundRemover installation complete."
}
# Run main function
main

View File

@ -0,0 +1,19 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Main function
main() {
# Create virtual environment
use_venv
# Prints ROCM info with available GPUs
rocm-smi
# Start BG-remover
cd webui/
TORCH_BLAS_PREFER_HIPBLASLT=0 $python_exec app.py
}
main

1
bitsandbytes-rocm-build/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
build_output/

View File

@ -0,0 +1,39 @@
FROM rocm/dev-ubuntu-22.04:6.1.2
ENV ROCM_ARCH="gfx1030" \
TORCH_VERSION="rocm6.1" \
DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8
WORKDIR /tmp
RUN apt-get update -y
RUN apt-get install -y wget git cron cmake make software-properties-common
# Install python3.10
RUN add-apt-repository ppa:deadsnakes/ppa -y && apt-get update -y
RUN apt-get install -y python3.10 python3.10-dev python3.10-venv
ENV VIRTUAL_ENV=/opt/venv
RUN python3.10 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN pip3 install --upgrade pip wheel setuptools
# Install pytorch for rocm
RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/${TORCH_VERSION}
# ROCM bitsandbytes
RUN apt-get install -y hipblas hipblaslt hiprand hipsparse hipcub rocthrust-dev
## Clone repo and install python requirements
RUN git clone --depth 1 -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
WORKDIR /tmp/bitsandbytes
RUN pip3 install -r requirements-dev.txt
## Build
RUN cmake -DCOMPUTE_BACKEND=hip -S . -DBNB_ROCM_ARCH=${ROCM_ARCH}
RUN make
RUN python3.10 setup.py bdist_wheel --universal
# Cleanup
RUN apt-get clean && pip3 cache purge

View File

@ -0,0 +1,15 @@
# bitsandbytes-rocm-build-pip
This is a simple script to help you build the latest bitsandbytes for rocm.
The official build process requires a lot of ROCM-related packages and can mess up your computer easily if you install the wrong packages.
This creates a Docker image that builds the package and extract the built wheel file that you can then easily install using pip.
```bash
./build.sh
./extract_build.sh
```
The wheel file will be in a folder named ``build_output/``
*You might also find one of my already built wheel in this folder or on the github releases page (may not be up to date)*

View File

@ -0,0 +1 @@
docker build . -t 'bitsandbytes-rocm-build:6.1.2' -f Dockerfile

View File

@ -0,0 +1,31 @@
#!/bin/bash
# Set variables
IMAGE_NAME="bitsandbytes-rocm-build:6.1.2"
CONTAINER_NAME="bitsandbytes-rocm-build"
FILE_IN_CONTAINER="/tmp/bitsandbytes/dist/"
FILE_ON_HOST="./build_output/"
# Run the Docker container
docker run -d --name $CONTAINER_NAME $IMAGE_NAME
# Check if the container is running
if [ "$(docker ps -q -f name=$CONTAINER_NAME)" ]; then
echo "Container $CONTAINER_NAME is running."
# Copy the file from the container to the host
docker cp $CONTAINER_NAME:$FILE_IN_CONTAINER $FILE_ON_HOST
if [ $? -eq 0 ]; then
echo "File copied successfully to $FILE_ON_HOST"
else
echo "Failed to copy file."
fi
else
echo "Failed to start container $CONTAINER_NAME."
fi
docker stop $CONTAINER_NAME
docker rm $CONTAINER_NAME
echo "Now you can install bitsandbytes locally using \"pip install\" with the file in the build_output/ folder"

View File

@ -1,18 +0,0 @@
VERSION=1.0.0
rm -rf buildenv build dist *.spec
python -m venv buildenv
source buildenv/bin/activate
#pip install -r requirements.txt # more junk
CFLAGS="-g0 -Wl,--strip-all" \
pip install \
--no-cache-dir \
--compile \
--global-option=build_ext \
--global-option="-j 4" \
-r requirements.txt # cleaner (less junk)
pip install pyinstaller
pyinstaller --noconfirm --onefile --console --add-data services:services --name ai-suite-rocm_${VERSION}_linux_x86_64 "main.py"
deactivate
rm -rf buildenv

4
comfyui-rocm/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
venv/
webui/
models/
*.log

52
comfyui-rocm/install.sh Executable file
View File

@ -0,0 +1,52 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
NAME="ComfyUI"
# Function to install/update
install() {
if [ -d "webui" ]; then
echo $NAME "is already installed. Updating..."
yes_or_no "Do you want to update $NAME?" && {
cd webui
git pull
echo "$NAME WebUI successfully updated."
}
else
echo "Cloning $NAME repository..."
git clone https://github.com/comfyanonymous/ComfyUI.git webui
echo "Running $NAME setup..."
$python_exec -m pip install -r webui/requirements.txt
cd webui/custom_nodes
# Install manager
git clone https://github.com/ltdrdata/ComfyUI-Manager.git
# Add GGUF support
git clone https://github.com/city96/ComfyUI-GGUF
$python_exec -m pip install --upgrade gguf numpy==1.26.4
# Add NF4 support
git clone https://github.com/comfyanonymous/ComfyUI_bitsandbytes_NF4.git
$python_exec -m pip install --upgrade ../../../bitsandbytes-rocm-build/bitsandbytes-0.43.3.dev0-cp310-cp310-linux_x86_64.whl # install bitsandbytes for rocm until it is available on pypi
ln -s webui/models models
fi
}
# Main function
main() {
prepare_env
install
clean
echo "$NAME installation/update complete. Use ./run.sh to start"
}
# Run main function
main

18
comfyui-rocm/run.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Main function
main() {
# Create virtual environment
use_venv
# Prints ROCM info with available GPUs
rocm-smi
# Start
TORCH_BLAS_PREFER_HIPBLASLT=0 $python_exec webui/main.py
}
main

View File

@ -1,81 +0,0 @@
import json
import os
from core.vars import logger
data = {}
file = os.path.join(os.path.expanduser("~"), ".config", "ai-suite-rocm", "config.json")
def create():
if not os.path.exists(file):
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, "w") as f:
f.write("{}")
logger.info(f"Created config file at {file}")
def read():
global data
with open(file, "r") as f:
data = json.load(f)
def write():
global data
with open(file, "w") as f:
json.dump(data, f)
def get(key: str, default=None):
global data
if key not in data:
return default
return data.get(key)
def put(key: str, value):
global data
data[key] = value
write()
def has(key: str):
global data
return key in data
def remove(key: str):
global data
if key in data:
data.pop(key)
write()
def clear():
global data
data = {}
write()
def create_file(filename: str, content: str):
with open(os.path.join(os.path.dirname(file), filename), "w") as f:
f.write(content)
def remove_file(filename: str):
os.remove(os.path.join(os.path.dirname(file), filename))
def file_exists(filename: str):
return os.path.exists(os.path.join(os.path.dirname(file), filename))
def get_file_path(filename: str):
return os.path.join(os.path.dirname(file), filename)
def open_file(filename: str, mode: str = 'w'):
return open(os.path.join(os.path.dirname(file), filename), mode)

View File

@ -1,76 +0,0 @@
# Taken from https://gitlab.com/jvadair/pyscreen
# All credit goes to the original author, jvadair
__all__ = ['Screen', 'create', 'kill', 'exists', 'ls']
import os
import signal
import subprocess
from datetime import datetime
class ScreenNotFound(Exception):
pass
class Screen:
def __init__(self, pid, kill_on_exit=False):
"""
:param pid: The process ID of the screen
Creates a new Screen object
"""
if not exists(pid):
raise ScreenNotFound(f"No screen with pid {pid}")
self.pid = pid
self.kill_on_exit = kill_on_exit
def __del__(self): # Destroy screen process when object deleted
if self.kill_on_exit:
kill(self.pid)
def send(self, command: str, end="\r") -> None:
"""
:param command: The command to be run
:param end: Appended to the end of the command - the default value is a carriage return
"""
os.system(f'screen -S {self.pid} -X stuff {command}{end}')
def create(name, shell=os.environ['SHELL'], logfile=None, title=None, kill_on_exit=False) -> Screen:
command = ["screen", "-DmS", name, '-s', shell]
if logfile:
command.append('-Logfile')
command.append(logfile)
if title:
command.append('-t')
command.append(title)
process = subprocess.Popen(command)
while not process.pid: pass
return Screen(process.pid, kill_on_exit)
def kill(pid):
os.kill(pid, signal.SIGTERM)
def exists(pid: int) -> bool:
command = f"screen -S {str(pid)} -Q select .".split()
pop = subprocess.Popen(command, stdout=subprocess.DEVNULL)
pop.wait()
code = pop.returncode
return False if code else True
def ls() -> dict:
out = subprocess.check_output(["screen", "-ls"]).decode()
out = out.replace('\t', '')
out = out.split('\n')
out = out[1:len(out) - 2]
out = [i.replace(")", "").split("(") for i in out]
final = {}
for item in out:
process = item[0].split('.')
pid = process[0]
final[pid] = {'name': process[1]} # final: {pid:{...}, ... }
final[pid]['time'] = datetime.strptime(item[1], '%m/%d/%Y %X %p')
return final

View File

@ -1,322 +0,0 @@
import os
import shutil
import subprocess
from pathlib import Path
from typing import Union, List, Optional
import psutil
from core import utils, screen
from core.screen import Screen
from core.vars import logger, PYTHON_EXEC
from ui import choices
class Stack:
def __init__(self, name: str, id: str, port: int, url: str):
"""
Initialize the Stack instance.
:param name: The name of the stack
:param id: A unique identifier for the stack
:param port: The port the stack service uses
:param url: The URL associated with the stack
"""
self.name = name
self.id = id
self.path = Path.home() / ".ai-suite-rocm" / id
self.url = url
self.port = port
self.pid_file = self.path / f".pid"
self.pid = self.read_pid()
self.screen_session: Screen = None
def read_pid(self) -> Optional[int]:
"""
Read the PID from the PID file, if it exists.
:return: The PID as an integer, or None if the file does not exist or an error occurs.
"""
if self.pid_file.exists():
return int(self.pid_file.read_text())
else:
return None
def write_pid(self, pid: int) -> None:
"""
Write the PID to the PID file.
:param pid: Process ID to write
"""
with self.pid_file.open('w') as f:
f.write(str(pid))
def remove_pid_file(self) -> None:
"""Remove the PID file if it exists."""
if self.pid_file.exists():
self.pid_file.unlink()
def install(self) -> None:
"""Install the stack, creating the virtual environment and performing initial setup."""
if self.is_installed():
self.update()
else:
self.check_for_broken_install()
self.create_dir('')
self.create_venv()
self._install()
self.create_file('.installed', 'true')
logger.info(f"Installed {self.name}")
def _install(self) -> None:
"""Additional installation steps specific to the stack (override as needed)."""
pass
def is_installed(self) -> bool:
"""
Check if the stack is installed by verifying the existence of the '.installed' file.
:return: True if the stack is installed, False otherwise.
"""
return self.file_exists('.installed')
def check_for_broken_install(self) -> None:
"""Check for a broken installation and clean up any leftover files."""
if not self.is_installed() and self.path.exists():
logger.warning("Found files from a previous/broken/crashed installation, cleaning up...")
self.remove_dir('')
def update(self, folder: str = 'webui') -> None:
"""Update the stack by pulling the latest changes from the repository."""
if self.is_installed():
was_running = self.status()
if was_running:
self.stop()
logger.info(f"Updating {self.name}")
symlinks = utils.find_symlink_in_folder(self.path)
self.git_pull(self.path / folder)
self._update()
utils.create_symlinks(symlinks)
if was_running:
self.start()
else:
logger.warning(f"Could not update {self.name} as {self.name} is not installed")
def _update(self) -> None:
"""Additional update steps specific to the stack (override as needed)."""
pass
def uninstall(self) -> None:
"""Uninstall the stack by stopping it and removing its files."""
logger.info(f"Uninstalling {self.name}")
if self.status():
self.stop()
self.bash(f"rm -rf {self.path}")
self.remove_pid_file()
def start(self) -> None:
"""Start the stack service."""
if self.status():
logger.warning(f"{self.name} is already running")
return
if self.is_installed():
self._start()
else:
logger.error(f"{self.name} is not installed")
def _start(self) -> None:
"""Additional start steps specific to the stack (override as needed)."""
pass
def stop(self) -> None:
"""Stop the stack service by terminating the associated process."""
if self.status():
logger.debug(f"Stopping {self.name} with PID: {self.pid}")
try:
proc = psutil.Process(self.pid)
proc.terminate() # Graceful shutdown
proc.wait(timeout=5)
except (psutil.NoSuchProcess, psutil.TimeoutExpired):
logger.warning(f"{self.name} did not terminate gracefully, forcing kill")
try:
psutil.Process(self.pid).kill()
except psutil.NoSuchProcess:
pass
self.remove_pid_file()
else:
logger.warning(f"{self.name} is not running")
def restart(self) -> None:
"""Restart the stack service."""
self.stop()
self.start()
def status(self) -> bool:
"""
Check if the stack service is running.
:return: True if the service is running, False otherwise.
"""
if self.screen_session and self.pid:
return screen.exists(self.screen_session.pid)
else:
return self.pid is not None and psutil.pid_exists(self.pid)
def create_venv(self) -> None:
"""Create a Python virtual environment for the stack."""
venv_path = self.path / 'venv'
if not self.has_venv():
logger.info(f"Creating venv for {self.name}")
self.bash(f"{PYTHON_EXEC} -m venv {venv_path} --system-site-packages")
self.pip_install("pip")
else:
logger.debug(f"Venv already exists for {self.name}")
def has_venv(self) -> bool:
"""
Check if the virtual environment exists.
:return: True if the virtual environment exists, False otherwise.
"""
return (self.path / 'venv').exists()
def pip_install(self, package: Union[str, List[str]], no_deps: bool = False, env: List[str] = [],
args: List[str] = []) -> None:
"""Install a Python package or list of packages using pip."""
if no_deps:
args.append("--no-deps")
if isinstance(package, list):
for p in package:
logger.info(f"Installing {p}")
self.pip(f"install -U {p}", env=env, args=args)
else:
logger.info(f"Installing {package}")
self.pip(f"install -U {package}", env=env, args=args)
def install_requirements(self, filename: str = 'requirements.txt', env: List[str] = []) -> None:
"""Install requirements from a given file."""
logger.info(f"Installing requirements for {self.name} ({filename})")
self.pip(f"install -r {filename}", env=env)
def pip(self, cmd: str, env: List[str] = [], args: List[str] = [], current_dir: Optional[Path] = None) -> None:
"""Run pip with a given command."""
self.python(f"-m pip {cmd}", env=env, args=args, current_dir=current_dir)
def python(self, cmd: str, env: List[str] = [], args: List[str] = [], current_dir: Optional[Path] = None,
daemon: bool = False) -> None:
"""Run a Python command inside the stack's virtual environment."""
self.bash(f"{' '.join(env)} {self.path / 'venv' / 'bin' / 'python'} {cmd} {' '.join(args)}", current_dir,
daemon)
def bash(self, cmd: str, current_dir: Optional[Path] = None, daemon: bool = False) -> None:
"""Run a bash command, optionally as a daemon."""
full_cmd = f"cd {current_dir or self.path} && {cmd}"
if daemon:
if self.status():
choice = choices.already_running.ask()
if choice is True:
self.stop()
self._start()
return
else:
return
else:
logger.debug(f"Running command as daemon: {full_cmd}")
# process = subprocess.Popen(full_cmd, shell=True, preexec_fn=os.setpgrp,
# stdout=config.open_file(f"{self.id}-stdout"),
# stderr=config.open_file(f"{self.id}-stderr"))
self.screen_session = screen.create(name=self.id, kill_on_exit=True)
self.screen_session.send(f"'{full_cmd} && screen -wipe'")
self.write_pid(self.screen_session.pid)
return
else:
logger.debug(f"Running command: {full_cmd}")
process = subprocess.Popen(full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.fatal(f"Failed to run command: {full_cmd}")
logger.fatal(f"Error: {err.decode('utf-8')}")
logger.fatal(f"Output: {out.decode('utf-8')}")
raise Exception(f"Failed to run command: {full_cmd}")
def git_clone(self, url: str, branch: Optional[str] = None, dest: Optional[Path] = None) -> None:
"""Clone a git repository."""
logger.info(f"Cloning {url}")
self.bash(f"git clone {f'-b {branch}' if branch else ''} {url} {dest or ''}")
def git_pull(self, repo_folder: Path, force: bool = False) -> None:
"""Pull changes from a git repository."""
self.bash(f"git reset --hard HEAD {'&& git clean -f -d' if force else ''} && git pull", repo_folder)
def install_from_prebuilt(self, name):
for prebuilt in utils.get_prebuilts():
if prebuilt['name'].split("-")[0] == name:
self.pip(f"install {prebuilt['browser_download_url']}")
return
def create_file(self, name: str, content: str) -> None:
"""Create a file with the given content."""
(self.path / name).write_text(content)
def create_dir(self, name: str) -> None:
"""Create a directory."""
dir_path = self.path / name
logger.debug(f"Creating directory {dir_path}")
dir_path.mkdir(parents=True, exist_ok=True)
def remove_file(self, name: str) -> None:
"""Remove a file."""
logger.debug(f"Removing file {name}")
os.remove(os.path.join(self.path, name))
def remove_dir(self, name: str) -> None:
"""Remove a directory."""
logger.debug(f"Removing directory {name or self.path}")
if not name:
shutil.rmtree(self.path)
else:
shutil.rmtree(os.path.join(self.path, name))
def move_file_or_dir(self, src: str, dest: str) -> None:
"""Move a file or directory."""
logger.debug(f"Moving file/dir {src} to {dest}")
os.rename(os.path.join(self.path, src), os.path.join(self.path, dest))
def move_all_files_in_dir(self, src: str, dest: str) -> None:
"""Move all files in a directory to another directory"""
logger.debug(f"Moving all files in directory {src} to {dest}")
for file in os.listdir(os.path.join(self.path, src)):
os.rename(os.path.join(self.path, src, file), os.path.join(self.path, dest, file))
def file_exists(self, name: str) -> bool:
"""Check if a file exists."""
return (self.path / name).exists()
def dir_exists(self, name: str) -> bool:
"""Check if a directory exists."""
return (self.path / name).exists()
def remove_line_in_file(self, contains: str | list, file: str):
"""Remove lines containing a specific string from a file."""
target_file = self.path / file
logger.debug(f"Removing lines containing {contains} in {target_file}")
if isinstance(contains, list):
for c in contains:
self.bash(f"sed -i '/{c}/d' {target_file}")
else:
self.bash(f"sed -i '/{contains}/d' {target_file}")
def replace_line_in_file(self, match: str, replace: str, file: str):
"""Replace lines containing a specific string in a file."""
target_file = self.path / file
logger.debug(f"Replacing lines containing {match} with {replace} in {target_file}")
self.bash(f"sed -i 's/{match}/{replace}/g' {target_file}")

View File

@ -1,145 +0,0 @@
import importlib
import json
import os
import shutil
import subprocess
from pathlib import Path
from typing import List, Dict, Tuple, Union
from urllib import request, error
from core.stack import Stack
from core.vars import ROCM_VERSION, logger
def get_prebuilts(repo_owner: str = "M4TH1EU", repo_name: str = "ai-suite-rocm-local",
release_tag: str = f"prebuilt-whl-{ROCM_VERSION}") -> List[dict]:
"""
Fetch prebuilt assets from a GitHub release using the GitHub API.
:param repo_owner: GitHub repository owner
:param repo_name: GitHub repository name
:param release_tag: Release tag for fetching assets
:return: List of assets (dictionaries) from the GitHub release
"""
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/tags/{release_tag}"
try:
with request.urlopen(api_url) as response:
if response.status != 200:
logger.error(f"Failed to fetch data: HTTP Status {response.status}")
return []
release_data = json.load(response)
assets = release_data.get('assets', [])
if not assets:
logger.error("No assets found in release data")
return []
return assets
except error.URLError as e:
logger.error(f"Error fetching release data: {e}")
return []
def check_for_build_essentials() -> None:
"""
Check if build essentials like `build-essential` and `python3.10-dev` are installed.
Raises a warning if they are missing.
"""
logger.debug("Checking for build essentials...")
debian = Path('/etc/debian_version').exists()
fedora = Path('/etc/fedora-release').exists()
if debian:
check_gcc = run_command("dpkg -l | grep build-essential &>/dev/null", exit_on_error=False)[2] == 0
check_python = run_command("dpkg -l | grep python3.10-dev &>/dev/null", exit_on_error=False)[2] == 0
if not check_gcc or not check_python:
raise UserWarning(
"The packages build-essential and python3.10-dev are required for this script to run. Please install them. See the README for more information."
)
elif fedora:
check_gcc = run_command("rpm -q gcc &>/dev/null", exit_on_error=False)[2] == 0
check_python = run_command("rpm -q python3.10-devel &>/dev/null", exit_on_error=False)[2] == 0
if not check_gcc or not check_python:
raise UserWarning(
"The package python3.10-devel and the Development Tools group are required for this script to run. Please install them. See the README for more information."
)
else:
logger.warning(
"Unsupported OS detected. Please ensure you have the following packages installed or their equivalent: build-essential, python3.10-dev"
)
def run_command(command: str, exit_on_error: bool = True) -> Tuple[bytes, bytes, int]:
"""
Run a shell command and return the output, error, and return code.
:param command: The shell command to run
:param exit_on_error: Whether to raise an exception on error
:return: A tuple containing stdout, stderr, and return code
"""
logger.debug(f"Running command: {command}")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
logger.fatal(f"Failed to run command: {command}")
if exit_on_error:
raise Exception(f"Failed to run command: {command}")
return out, err, process.returncode
def load_service_from_string(service: str) -> Stack:
"""
Dynamically load a Stack class based on the service string.
:param service: Name of the service to load
:return: An instance of the corresponding Stack class
"""
logger.debug(f"Loading service from string: {service}")
service_name = service.replace("_", " ").title().replace(" ", "")
module = importlib.import_module(f"services.{service}")
stack_class = getattr(module, service_name)
return stack_class()
def find_symlink_in_folder(folder: Union[str, Path]) -> Dict[Path, Path]:
"""
Find all symbolic links in the given folder and map them to their resolved paths.
:param folder: The folder to search for symlinks
:return: A dictionary mapping symlink paths to their resolved target paths
"""
folder = Path(folder)
symlinks = {}
for file in folder.rglob("webui/**"):
if file.is_symlink():
symlinks[file] = file.resolve()
return symlinks
def create_symlinks(symlinks: Dict[Path, Path]) -> None:
"""
Recreate symlinks from a dictionary mapping target paths to link paths.
:param symlinks: Dictionary of symlinks and their resolved paths
"""
for target, link in symlinks.items():
logger.debug(f"(re)Creating symlink: {link} -> {target}")
if target.is_symlink():
target.unlink()
if target.exists() and target.is_dir():
shutil.rmtree(target)
os.symlink(link, target)

View File

@ -1,19 +0,0 @@
import logging
import os
PYTHON_EXEC = 'python3.10'
PATH = os.path.dirname(os.path.abspath(__file__))
ROCM_VERSION = "6.2"
logger = logging.getLogger('ai-suite-rocm')
services = [
'background_removal_dis',
'comfy_ui',
'stable_diffusion_webui',
'stable_diffusion_forge',
'text_generation_webui',
'xtts_webui'
]
loaded_services = {}

View File

@ -0,0 +1 @@
build_output/

View File

@ -0,0 +1,39 @@
FROM rocm/dev-ubuntu-22.04:6.1.2
ENV TORCH_VERSION="rocm6.1" \
DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
# for llama
CMAKE_ARGS="-DGGML_HIPBLAS=on" \
FORCE_CMAKE=1
WORKDIR /tmp
RUN apt-get update -y
RUN apt-get install -y wget git cron cmake make software-properties-common
# Install python3.10
RUN add-apt-repository ppa:deadsnakes/ppa -y && apt-get update -y
RUN apt-get install -y python3.10 python3.10-dev python3.10-venv
ENV VIRTUAL_ENV=/opt/venv
RUN python3.10 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN pip3 install --upgrade pip wheel setuptools build
# Install pytorch for rocm
RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/${TORCH_VERSION}
# ROCM llama-cpp-python
RUN apt-get install -y hipblas hipblaslt hiprand hipsparse hipcub rocthrust-dev
## Clone repo and install python requirements
RUN git clone --recurse-submodules https://github.com/abetlen/llama-cpp-python.git
WORKDIR /tmp/llama-cpp-python
## Build
RUN python3.10 -m build --wheel
# Cleanup
RUN apt-get clean && pip3 cache purge

View File

@ -0,0 +1,16 @@
# llama-cpp-python-rocm-build-pip
This is a simple script to help you build the latest llama-cpp-python for rocm.
The official build process requires a lot of ROCM-related packages and can mess up your computer easily if you install the wrong packages.
This creates a Docker image that builds the package and extract the built wheel file that you can then easily install using pip.
```bash
./build.sh
./extract_build.sh
```
The wheel file will be in a folder named ``build_output/``
*You might also find one of my already built wheel in this folder or on the github releases page (may not be up to date)*

View File

@ -0,0 +1 @@
docker build . -t 'llama-cpp-python-rocm-build:6.1.2' -f Dockerfile

View File

@ -0,0 +1,31 @@
#!/bin/bash
# Set variables
IMAGE_NAME="llama-cpp-python-rocm-build:6.1.2"
CONTAINER_NAME="llama-cpp-python-rocm-build"
FILE_IN_CONTAINER="/tmp/llama-cpp-python/dist/"
FILE_ON_HOST="./build_output/"
# Run the Docker container
docker run -d --name $CONTAINER_NAME $IMAGE_NAME
# Check if the container is running
if [ "$(docker ps -q -f name=$CONTAINER_NAME)" ]; then
echo "Container $CONTAINER_NAME is running."
# Copy the file from the container to the host
docker cp $CONTAINER_NAME:$FILE_IN_CONTAINER $FILE_ON_HOST
if [ $? -eq 0 ]; then
echo "File copied successfully to $FILE_ON_HOST"
else
echo "Failed to copy file."
fi
else
echo "Failed to start container $CONTAINER_NAME."
fi
docker stop $CONTAINER_NAME
docker rm $CONTAINER_NAME
echo "Now you can install llama-cpp-python locally using \"pip install\" with the file in the build_output/ folder"

40
main.py
View File

@ -1,40 +0,0 @@
import logging
import sys
from core import config
from core.utils import check_for_build_essentials, load_service_from_string
from core.vars import logger, services, loaded_services
from ui.choices import update_choices
from ui.interface import run_interactive_cmd_ui
def setup_logger(level: logger.level = logging.INFO):
if not logger.hasHandlers():
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('[%(levelname)s] : %(message)s'))
logger.addHandler(handler)
def setup_config():
config.create()
config.read()
def load_services():
for service in services:
loaded_services[service] = load_service_from_string(service)
if __name__ == '__main__':
setup_logger(logging.DEBUG)
logger.info("Starting AI Suite for ROCM")
setup_config()
check_for_build_essentials()
load_services()
update_choices()
run_interactive_cmd_ui()

View File

@ -1,57 +0,0 @@
ARG ROCM_VERSION=6.2
FROM rocm/dev-ubuntu-22.04:${ROCM_VERSION}
# also works without -complete for specific arch (tested on gfx1030)
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8
# For bitsandbytes
ENV BNB_GPU_TARGETS="gfx803;gfx900;gfx906;gfx908;gfx90a;gfx1010;gfx1030;gfx1100;gfx1101;gfx1102"
# For LLAMA
# Synthax error but works
ARG ROCM_DOCKER_ARCH=\
gfx803 \
gfx900 \
gfx906 \
gfx908 \
gfx90a \
gfx1010 \
gfx1030 \
gfx1100 \
gfx1101 \
gfx1102
# no synthax error but doesn't work for some reason (error on llama compile)??
# CMake Error: The source directory "/tmp/llama-cpp-python/gfx1102" does not exist.
# ARG ROCM_DOCKER_ARCH="gfx803 gfx 900 gfx906 gfx908 gfx90a gfx1010 gfx1030 gfx1100 gfx1101 gfx1102"
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
ENV CC=/opt/rocm/llvm/bin/clang
ENV CXX=/opt/rocm/llvm/bin/clang++
WORKDIR /tmp
RUN apt-get update -y
RUN apt-get install -y wget git cron cmake make software-properties-common
# Install python3.10
RUN add-apt-repository ppa:deadsnakes/ppa -y && apt-get update -y
RUN apt-get install -y python3.10 python3.10-dev python3.10-venv
ENV VIRTUAL_ENV=/opt/venv
RUN python3.10 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN pip3 install --upgrade pip wheel setuptools build
# Install pytorch for rocm
RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/${TORCH_VERSION}
# Install deps
RUN apt-get install -y hipblas hipblaslt hiprand hipsparse hipcub rocthrust-dev
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
# Cleanup
RUN apt-get clean && pip3 cache purge

View File

@ -1 +0,0 @@
docker build . -t 'prebuilts-rocm:6.2' -f Dockerfile

View File

@ -1,18 +0,0 @@
#!/bin/sh -l
# ROCM bitsandbytes
## Clone repo and install python requirements
git clone --depth 1 -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git /tmp/bitsandbytes
cd /tmp/bitsandbytes
pip3 install -r requirements-dev.txt
## Build
cmake -DCOMPUTE_BACKEND=hip -S . -DBNB_ROCM_ARCH=${BNB_GPU_TARGETS}
make
python3.10 setup.py bdist_wheel --universal
# ROCM llama-cpp-python
## Clone repo and install python requirements
git clone --recurse-submodules https://github.com/abetlen/llama-cpp-python.git /tmp/llama-cpp-python
cd /tmp/llama-cpp-python
CMAKE_ARGS="-D GGML_HIPBLAS=on -D AMDGPU_TARGETS=${GPU_TARGETS}" FORCE_CMAKE=1 python3.10 -m build --wheel

View File

@ -1,27 +0,0 @@
#!/bin/bash
# Set variables
IMAGE_NAME="prebuilts-rocm:6.2"
CONTAINER_NAME="prebuilts-rocm"
FILES_TO_COPY=["/tmp/bitsandbytes/dist/", "/tmp/llama-cpp-python/dist/"]
WHERE_TO_PASTE="./build_output/"
# Run the Docker container
docker run -d --name $CONTAINER_NAME $IMAGE_NAME
# Check if the container is running
if [ "$(docker ps -q -f name=$CONTAINER_NAME)" ]; then
echo "Container $CONTAINER_NAME is running."
# Copy the files from the container to the host
for file in $FILES_TO_COPY; do
docker cp $CONTAINER_NAME:$file $WHERE_TO_PASTE
done
echo "Files copied to $WHERE_TO_PASTE."
else
echo "Failed to start container $CONTAINER_NAME."
fi
docker stop $CONTAINER_NAME
docker rm $CONTAINER_NAME

View File

@ -1,2 +0,0 @@
questionary~=2.0.1
psutil~=6.0.0

View File

@ -1,33 +0,0 @@
from pathlib import Path
from core.stack import Stack
class BackgroundRemovalDis(Stack):
def __init__(self):
super().__init__(
'Background Removal (DIS)',
'background_removal_dis',
5005,
'https://huggingface.co/spaces/ECCV2022/dis-background-removal'
)
def _install(self):
self.git_clone(url=self.url, dest=Path(self.path / "webui"))
self.install_requirements("webui/requirements.txt")
self.pip_install("gradio") # gradio is not in requirements.txt for some reason
self.remove_line_in_file("os.", "webui/app.py") # remove manual clone of DIS from app.py (done below)
self.git_clone("https://github.com/xuebinqin/DIS.git", dest=Path("tmp-dis"))
self.move_all_files_in_dir("tmp-dis/IS-Net", "webui")
self.remove_dir("tmp-dis")
self.create_dir("webui/saved_models")
self.move_file_or_dir("webui/isnet.pth", "webui/saved_models/isnet.pth")
# self.remove_dir("webui/.git") # saves a lot of space due to big repo
def _start(self):
self.python(f"app.py", current_dir=Path(self.path / "webui"),
env=["TORCH_BLAS_PREFER_HIPBLASLT=0", f"GRADIO_SERVER_PORT={self.port}"], daemon=True)

View File

@ -1,35 +0,0 @@
from pathlib import Path
from core.stack import Stack
class ComfyUi(Stack):
def __init__(self):
super().__init__(
'ComfyUI',
'comfy_ui',
5004,
'https://github.com/comfyanonymous/ComfyUI.git'
)
def _install(self):
# Install the webui
self.git_clone(url=self.url, dest=Path(self.path / "webui"))
self.install_requirements("webui/requirements.txt")
# Install the manager
self.git_clone(url="https://github.com/ltdrdata/ComfyUI-Manager.git", dest=Path("webui/custom_nodes/manager"))
# Add GGUF support
self.git_clone(url="https://github.com/city96/ComfyUI-GGUF.git", dest=Path("webui/custom_nodes/ComfyUI-GGUF"))
self.pip_install(["gguf", "numpy==1.26.4"])
# Add NF4 support for Flux
self.install_from_prebuilt("bitsandbytes")
self.git_clone(url="https://github.com/comfyanonymous/ComfyUI_bitsandbytes_NF4.git",
dest=Path("webui/custom_nodes/ComfyUI_bitsandbytes_NF4"))
def _start(self):
args = ["--port", str(self.port), "--force-fp32"]
self.python(f"main.py", args=args, current_dir=Path(self.path / "webui"),
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"], daemon=True)

View File

@ -1,27 +0,0 @@
from pathlib import Path
from core.stack import Stack
class StableDiffusionForge(Stack):
def __init__(self):
super().__init__(
'StableDiffusion Forge WebUI',
'stable_diffusion_forge',
5003,
'https://github.com/lllyasviel/stable-diffusion-webui-forge'
)
def _install(self):
# Install the webui
self.git_clone(url=self.url, dest=Path(self.path / "webui"))
self.python("launch.py --skip-torch-cuda-test --exit", current_dir=Path(self.path / "webui"))
# Add NF4 support for Flux
self.install_from_prebuilt("bitsandbytes")
def _start(self):
args = ["--listen", "--enable-insecure-extension-access", "--port", str(self.port)]
self.python(f"launch.py", args=args, current_dir=Path(self.path / "webui"),
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"], daemon=True)

View File

@ -1,27 +0,0 @@
from pathlib import Path
from core.stack import Stack
class StableDiffusionWebui(Stack):
def __init__(self):
super().__init__(
'StableDiffusion WebUI',
'stable_diffusion_webui',
5002,
'https://github.com/AUTOMATIC1111/stable-diffusion-webui'
)
def _install(self):
# Install the webui
self.git_clone(url=self.url, branch="dev", dest=Path(self.path / "webui"))
self.python("launch.py --skip-torch-cuda-test --exit", current_dir=Path(self.path / "webui"))
# Add NF4 support for Flux
self.install_from_prebuilt("bitsandbytes")
def _start(self):
args = ["--listen", "--enable-insecure-extension-access", "--port", str(self.port)]
self.python(f"launch.py", args=args, current_dir=Path(self.path / "webui"),
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"], daemon=True)

View File

@ -1,65 +0,0 @@
from pathlib import Path
from core.stack import Stack
class TextGenerationWebui(Stack):
def __init__(self):
super().__init__(
'Text Generation',
'text_generation_webui',
5000,
'https://github.com/oobabooga/text-generation-webui/'
)
self.exllama = "0.2.1"
def _install(self):
# Install LlamaCpp from prebuilt
self.pip_install("llama-cpp-python", env=["CMAKE_ARGS=\"-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS\""]) # cpu
# Install LlamaCpp for ROCM from source
# self.pip_install("llama-cpp-python", env=["CMAKE_ARGS=\"-DGGML_HIPBLAS=on\" FORCE_CMAKE=1"]) # manual gpu (only works if whole rocm suite installed)
# self.install_from_prebuilt("llama_cpp_python") # gpu (only works if whole rocm suite installed)
# Install Triton for ROCM from prebuilt
# self.install_from_prebuilt("triton")
# Install Triton for ROCM from source
# self.git_clone(url="https://github.com/ROCmSoftwarePlatform/triton.git")
# self.pip_install(['ninja', 'cmake'])
# self.pip("install -e .", path="triton")
# Install the webui
self.git_clone(url=self.url, dest=Path(self.path / "webui"))
self.remove_line_in_file(["accelerate", "lm_eval", "optimum", "autoawq", "llama_cpp_python"],
"webui/requirements_amd.txt")
self.install_requirements("webui/requirements_amd.txt")
self.pip_install(["accelerate", "optimum"])
self.pip_install(
"https://github.com/casper-hansen/AutoAWQ_kernels/releases/download/v0.0.7/autoawq_kernels-0.0.7+rocm571-cp310-cp310-linux_x86_64.whl",
no_deps=True)
self.pip_install(
"https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.6/autoawq-0.2.6-cp310-cp310-linux_x86_64.whl",
no_deps=True)
# Fix llama trying to use cuda version
self.remove_line_in_file("llama_cpp_cuda", "webui/modules/llama_cpp_python_hijack.py")
# Install ExLlamaV2 and auto-gptq
self.pip_install(
f"https://github.com/turboderp/exllamav2/releases/download/v{self.exllama}/exllamav2-{self.exllama}+rocm6.1.torch2.4.0-cp310-cp310-linux_x86_64.whl")
self.install_from_prebuilt("bitsandbytes")
self.pip_install("auto-gptq", args=["--no-build-isolation", "--extra-index-url",
"https://huggingface.github.io/autogptq-index/whl/rocm573/"])
def _update(self):
self.pip_install(
f"https://github.com/turboderp/exllamav2/releases/download/v{self.exllama}/exllamav2-{self.exllama}+rocm6.1.torch2.4.0-cp310-cp310-linux_x86_64.whl")
self.pip_install("auto-gptq", args=["--no-build-isolation", "--extra-index-url",
"https://huggingface.github.io/autogptq-index/whl/rocm573/"])
def _start(self):
args = ["--listen", "--listen-port", str(self.port)]
self.python(f"server.py", args=args, current_dir=Path(self.path / "webui"),
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"], daemon=True)

View File

@ -1,38 +0,0 @@
from pathlib import Path
from core.stack import Stack
class XttsWebui(Stack):
def __init__(self):
super().__init__(
'XTTS WebUI',
'xtts_webui',
5001,
'https://github.com/daswer123/xtts-webui'
)
def _install(self):
# Install the webui
self.git_clone(url=self.url, dest=Path(self.path / "webui"))
self.remove_line_in_file("torch", "webui/requirements.txt")
self.install_requirements("webui/requirements.txt")
# sed -i 's/device = "cuda" if torch.cuda.is_available() else "cpu"/device = "cpu"/' webui/scripts/utils/formatter.py
# sed -i 's/asr_model = WhisperModel(whisper_model, device=device, compute_type="float16")/asr_model = WhisperModel(whisper_model, device=device, compute_type="int8")/' webui/scripts/utils/formatter.py
# Disable gpu for faster-whipser as ROCM isn't supported yet
self.replace_line_in_file("device = \"cuda\" if torch.cuda.is_available() else \"cpu\"", "device = \"cpu\"",
"webui/scripts/utils/formatter.py")
self.replace_line_in_file("asr_model = WhisperModel(whisper_model, device=device, compute_type=\"float16\")",
"asr_model = WhisperModel(whisper_model, device=device, compute_type=\"int8\")",
"webui/scripts/utils/formatter.py")
# Deepspeed and ninja (not working yet)
# self.pip_install(["ninja", "deepspeed"])
def _start(self):
args = ["--host", "0.0.0.0", "--port", str(self.port)]
self.python(f"server.py", current_dir=Path(self.path / "webui"),
env=["TORCH_BLAS_PREFER_HIPBLASLT=0"], args=args, daemon=True)

4
stablediffusion-forge-rocm/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
venv/
webui/
models/
outputs/

View File

@ -0,0 +1,43 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Function to install/update StableDiffusion
install_stablediffusionforge() {
if [ -d "webui" ]; then
echo "StableDiffusionForge repository already exists."
yes_or_no "Do you want to update StableDiffusionForge WebUI ?" && {
cd webui
git pull
echo "StableDiffusionForge WebUI successfully updated."
}
else
echo "Cloning StableDiffusionForge repository..."
git clone https://github.com/lllyasviel/stable-diffusion-webui-forge webui
echo "Running StableDiffusionForge setup..."
$python_exec webui/launch.py --skip-torch-cuda-test --exit
echo "Adding Flux NF4 support for ROCM"
$python_exec -m pip install --upgrade ../bitsandbytes-rocm-build/bitsandbytes-0.43.3.dev0-cp310-cp310-linux_x86_64.whl # install bitsandbytes for rocm until it is available on pypi
ln -s webui/models models
ln -s webui/outputs outputs
fi
}
# Main function
main() {
prepare_env
# Install StableDiffusionForge
install_stablediffusionforge
clean
echo "StableDiffusion installation/update complete. Use ./run.sh to start"
}
# Run main function
main

View File

@ -0,0 +1,18 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Main function
main() {
# Create virtual environment
use_venv
# Prints ROCM info with available GPUs
rocm-smi
# Start SD
TORCH_BLAS_PREFER_HIPBLASLT=0 $python_exec webui/launch.py --listen --enable-insecure-extension-access # --opt-split-attention
}
main

4
stablediffusion-rocm/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
venv/
webui/
models/
outputs/

40
stablediffusion-rocm/install.sh Executable file
View File

@ -0,0 +1,40 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Function to install/update StableDiffusion
install_stablediffusion() {
if [ -d "webui" ]; then
echo "StableDiffusion repository already exists."
yes_or_no "Do you want to update StableDiffusion WebUI (dev branch) ?" && {
cd webui
git pull
echo "StableDiffusion WebUI successfully updated."
}
else
echo "Cloning StableDiffusion repository..."
git clone -b dev https://github.com/AUTOMATIC1111/stable-diffusion-webui webui
echo "Running StableDiffusion setup..."
$python_exec webui/launch.py --skip-torch-cuda-test --exit
ln -s webui/models models
ln -s webui/outputs outputs
fi
}
# Main function
main() {
prepare_env
# Install StableDiffusion
install_stablediffusion
clean
echo "StableDiffusion installation/update complete. Use ./run.sh to start"
}
# Run main function
main

18
stablediffusion-rocm/run.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Main function
main() {
# Create virtual environment
use_venv
# Prints ROCM info with available GPUs
rocm-smi
# Start SD
TORCH_BLAS_PREFER_HIPBLASLT=0 $python_exec webui/launch.py --listen --enable-insecure-extension-access --opt-split-attention
}
main

4
text-generation-rocm/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
venv/
webui/
models/
outputs/

90
text-generation-rocm/install.sh Executable file
View File

@ -0,0 +1,90 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
NAME="TextGeneration"
# Function to install/update
install() {
if [ -d "webui" ]; then
echo $NAME "is already installed. Updating..."
yes_or_no "Do you want to update $NAME?" && {
cd webui
git pull
echo "$NAME WebUI successfully updated."
}
else
# Add BnB
$python_exec -m pip install --upgrade https://github.com/M4TH1EU/ai-suite-rocm-local/releases/download/prebuilt-wheels-for-rocm/bitsandbytes-0.43.3-cp310-cp310-linux_x86_64.whl # install bitsandbytes for rocm until it is available on pypi
# Add AutoGPTQ
$python_exec -m pip install auto-gptq --no-build-isolation --extra-index-url https://huggingface.github.io/autogptq-index/whl/rocm573/
# Add ExLlamav2
$python_exec -m pip install https://github.com/turboderp/exllamav2/releases/download/v0.1.9/exllamav2-0.1.9+rocm6.1.torch2.4.0-cp310-cp310-linux_x86_64.whl
# Add LlamaCPP
CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # cpu
# CMAKE_ARGS="-DGGML_HIPBLAS=on" FORCE_CMAKE=1 $python_exec -m pip install llama-cpp-python # gpu
# llama cpp built with hipblas doesn't work unless the whole rocm stack is installed locally
# so for now, use llama with openblas (cpu)
# main_venv_path=$(dirname $(python -c "import torch; print(torch.__file__)"))"/lib/"
# llama_lib_path="$(pwd)/venv/lib64/python3.10/site-packages/llama_cpp/lib"
#
# for file in "$main_venv_path"/*.so; do
# ln -s "$file" "$llama_lib_path/$(basename "$file")"
# done
# ln -s "$llama_lib_path/libhipblas.so" "$llama_lib_path/libhipblas.so.1"
# ln -s "$llama_lib_path/libhipblas.so" "$llama_lib_path/libhipblas.so.2"
# ln -s "$llama_lib_path/librocblas.so" "$llama_lib_path/librocblas.so.3"
# ln -s "$llama_lib_path/librocblas.so" "$llama_lib_path/librocblas.so.4"
# ln -s "$llama_lib_path/libamdhip64.so" "$llama_lib_path/libamdhip64.so.5"
# ln -s "$llama_lib_path/libamdhip64.so" "$llama_lib_path/libamdhip64.so.6"
# Add Triton
# $python_exec -m pip install https://github.com/M4TH1EU/ai-suite-rocm-local/releases/download/prebuilt-wheels-for-rocm/llama_cpp_python-0.2.89-cp310-cp310-linux_x86_64.whl
# git clone https://github.com/ROCmSoftwarePlatform/triton.git .tritonrocm
# cd .tritonrocm/python
# $python_exec -m pip install ninja cmake; # build time dependencies
# $python_exec -m pip uninstall triton -y && $python_exec -m pip install -e .
# cd .. && sudo rm -R .tritonrocm
echo "Cloning $NAME repository..."
git clone https://github.com/oobabooga/text-generation-webui.git webui
echo "Running $NAME setup..."
# For some reasons theses want to reinstall torch for nvidia instead of using the download for rocm so manually install them
sed -i '/accelerate/d' webui/requirements_amd.txt
sed -i '/lm_eval/d' webui/requirements_amd.txt
sed -i '/optimum/d' webui/requirements_amd.txt
sed -i '/autoawq/d' webui/requirements_amd.txt
sed -i '/llama_cpp_python/d' webui/requirements_amd.txt
$python_exec -m pip install -r webui/requirements_amd.txt
# only works after requirements_amd.txt is installed ??!
$python_exec -m pip install accelerate optimum
$python_exec -m pip install https://github.com/casper-hansen/AutoAWQ_kernels/releases/download/v0.0.7/autoawq_kernels-0.0.7+rocm571-cp310-cp310-linux_x86_64.whl --no-deps
$python_exec -m pip install https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.6/autoawq-0.2.6-cp310-cp310-linux_x86_64.whl --no-deps
$python_exec -m pip install lm_eval
ln -s webui/models models
fi
}
# Main function
main() {
prepare_env
install
clean
echo "$NAME installation/update complete. Use ./run.sh to start"
}
# Run main function
main

18
text-generation-rocm/run.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
source ../utils.sh
python_exec="$(pwd)/venv/bin/python3.10"
# Main function
main() {
# Create virtual environment
use_venv
# Prints ROCM info with available GPUs
rocm-smi
# Start
cd webui
TORCH_BLAS_PREFER_HIPBLASLT=0 $python_exec server.py --listen
}
main

View File

@ -1,58 +0,0 @@
import questionary
from questionary import Choice
from core.vars import loaded_services
start = None
start_service = None
stop_service = None
install_service = None
uninstall_service = None
are_you_sure = None
any_key = None
already_running = None
def update_choices():
global start, start_service, stop_service, install_service, uninstall_service, are_you_sure, any_key, already_running
start = questionary.select(
"Choose an option:",
choices=[
Choice("Start service"),
Choice("Stop service"),
Choice("Install/update service"),
Choice("Uninstall service"),
Choice("exit")
])
_services_choices = [Choice(f"{service.name} [{'ON' if service.status() else 'OFF'}]", value=service.id) for service
in loaded_services.values()]
_services_choices.append(Choice("go back", value="back"))
start_service = questionary.select(
"Select service to start:",
choices=_services_choices
)
stop_service = questionary.select(
"Select service to stop:",
choices=_services_choices
)
install_service = questionary.select(
"Select service to install/update:",
choices=_services_choices
)
uninstall_service = questionary.select(
"Select service to uninstall:",
choices=_services_choices
)
are_you_sure = questionary.confirm("Are you sure?")
already_running = questionary.confirm("Service is already running, do you want to restart it?")
any_key = questionary.text("Press any key to continue")

View File

@ -1,75 +0,0 @@
import os
import questionary
from core.vars import loaded_services
from ui import choices
from ui.choices import update_choices
def clear_terminal():
os.system('cls' if os.name == 'nt' else 'clear')
def handle_services(action, service):
clear_terminal()
if service == "back":
update_choices()
return
service = loaded_services[service]
if action == "start":
questionary.print(f"Starting {service.name} (127.0.0.1:{service.port})...")
service.start()
elif action == "stop":
questionary.print(f"Stopping {service.name}...")
service.stop()
elif action == "install":
confirmation = choices.are_you_sure.ask()
if confirmation:
questionary.print(f"Installing {service.name}...")
service.install()
elif action == "uninstall":
confirmation = choices.are_you_sure.ask()
if confirmation:
type_confirmation = questionary.text(
f"Please type {service.id} to confirm uninstallation (or type cancel):")
value = type_confirmation.ask()
if value == "cancel":
questionary.print("Canceled", style="fg:ansired bold")
elif value != service.id:
questionary.print("Invalid input, please try again", style="fg:ansired bold")
elif value == service.id:
service.uninstall()
choices.any_key.ask()
def run_interactive_cmd_ui():
while True:
clear_terminal()
update_choices()
choice = choices.start.ask()
if choice == "Start service":
service = choices.start_service.ask()
handle_services("start", service)
elif choice == "Stop service":
service = choices.stop_service.ask()
handle_services("stop", service)
elif choice == "Install/update service":
service = choices.install_service.ask()
handle_services("install", service)
elif choice == "Uninstall service":
service = choices.uninstall_service.ask()
handle_services("uninstall", service)
elif choice == "exit":
print("Exiting...")
exit(0)

84
utils.sh Normal file
View File

@ -0,0 +1,84 @@
#!/bin/bash
# Function to check if PyTorch is installed
check_pytorch_installed() {
python -c "import torch" >/dev/null 2>&1
return $?
}
# Function to create virtual environment
create_venv() {
if [ -d "venv" ]; then
echo "Virtual environment already exists. Skipping creation."
source venv/bin/activate
else
echo "Creating virtual environment..."
python3.10 -m venv --system-site-packages venv
source venv/bin/activate
python3.10 -m pip install --upgrade pip
fi
}
use_venv() {
echo "Connecting to virtual environment..."
source venv/bin/activate
}
# Function to install build-essential or equivalent
install_build_essentials() {
echo "Checking for build essentials..."
if [ -f /etc/debian_version ]; then
sudo apt-get update
sudo apt-get install -y build-essential python3.10-dev
elif [ -f /etc/fedora-release ]; then
if rpm -q gcc &>/dev/null && rpm -q python3.10-devel &>/dev/null; then
echo "Development Tools and Python 3.10 development files are already installed."
else
echo "Installing Development Tools and Python 3.10 development files..."
sudo dnf groupinstall -y "Development Tools"
sudo dnf install -y python3.10-devel
fi
else
echo "Unsupported operating system. Please install build-essential or equivalent manually."
exit 1
fi
}
# Function to install PyTorch in the virtual environment
install_pytorch() {
# Check if PyTorch is installed
if check_pytorch_installed; then
echo "PyTorch is already installed."
else
echo "Installing PyTorch..."
python3.10 -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.1
fi
}
prepare_env(){
# Create virtual environment
create_venv
# Install build essentials
install_build_essentials
# Install PyTorch in the virtual environment
install_pytorch
}
clean() {
python3.10 -m pip cache purge
}
yes_or_no() {
while true; do
read -p "$* [y/n]: " yn
case $yn in
[Yy]*) return 0 ;;
[Nn]*) echo "Aborted" ; return 1 ;;
esac
done
}

2
xtts-rocm/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
venv/
webui/

48
xtts-rocm/install.sh Executable file
View File

@ -0,0 +1,48 @@
#!/bin/bash
source ../utils.sh
python_exec=venv/bin/python3.10
# Function to install/update StableDiffusion
install_xtts() {
if [ -d "webui" ]; then
echo "XTTS repository already exists. Skipping clone."
yes_or_no "Do you want to update XTTS WebUI ?" && {
cd webui
rm requirements_without_torch.txt
git pull
echo "XTTS WebUI successfully updated."
cd ..
}
else
echo "Cloning XTTS repository..."
git clone https://github.com/daswer123/xtts-webui webui
fi
iconv -f UTF-16 -t UTF-8 webui/requirements.txt | grep -v 'torch' > webui/requirements_without_torch.txt
$python_exec -m pip install -r webui/requirements_without_torch.txt
# Disable gpu for faster-whipser as ROCM isn't supported yet
sed -i 's/device = "cuda" if torch.cuda.is_available() else "cpu"/device = "cpu"/' webui/scripts/utils/formatter.py
sed -i 's/asr_model = WhisperModel(whisper_model, device=device, compute_type="float16")/asr_model = WhisperModel(whisper_model, device=device, compute_type="int8")/' webui/scripts/utils/formatter.py
# Deepspeed and ninja (not working)
$python_exec -m pip install ninja deepspeed
# apt-get install -y ninja-build
ln -S webui/models models
}
# Main function
main() {
prepare_env
# Install XTTS
install_xtts
clean
echo "XTTS installation/update complete."
}
# Run main function
main

18
xtts-rocm/run.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
source ../utils.sh
python_exec=venv/bin/python3.10
# Main function
main() {
# Create virtual environment
use_venv
# Prints ROCM info with available GPUs
rocm-smi
# Start XTTS
cd webui/
TORCH_BLAS_PREFER_HIPBLASLT=0 ../$python_exec app.py --host 0.0.0.0 -v v2.0.3
}
main