Push to git
This commit is contained in:
commit
738673ee67
8
.gitignore
vendored
Normal file
8
.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
stablediffusion/models/
|
||||
stablediffusion/outputs/
|
||||
stablediffusion/extensions/
|
||||
stablediffusion/embeddings/
|
||||
llamacpp/extra/
|
||||
llamacpp/models/
|
||||
koboldai/models/
|
||||
koboldai/presets/
|
69
Dockerfile
Normal file
69
Dockerfile
Normal file
@ -0,0 +1,69 @@
|
||||
FROM rocm/dev-ubuntu-22.04:5.7-complete
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONIOENCODING=UTF-8
|
||||
WORKDIR /tmp
|
||||
|
||||
# Install python 3.10 and requirements
|
||||
RUN apt-get update -y && apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
python3.10 \
|
||||
python3-dev \
|
||||
python-is-python3 \
|
||||
python3.10-venv \
|
||||
rsync
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
|
||||
RUN python3.10 -m pip install --upgrade pip wheel setuptools
|
||||
|
||||
# Install PyTorch
|
||||
RUN python3.10 -m pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7
|
||||
|
||||
# Create ai folder for saving projects
|
||||
RUN mkdir /ai/
|
||||
|
||||
# Install StableDiffusion in /ai/
|
||||
WORKDIR /ai/stablediffusion-webui
|
||||
RUN git clone -b dev https://github.com/AUTOMATIC1111/stable-diffusion-webui /ai/git/stablediffusion-webui
|
||||
RUN cp -R /ai/git/stablediffusion-webui/* /ai/stablediffusion-webui/
|
||||
# Create VENV for StableDiffusion with inherit pytorch
|
||||
RUN python3.10 -m venv /ai/venv/stablediffusion/ --system-site-packages
|
||||
RUN /ai/venv/stablediffusion/bin/python launch.py --skip-torch-cuda-test --exit
|
||||
RUN /ai/venv/stablediffusion/bin/python -m pip install opencv-python-headless tomesd protobuf --upgrade
|
||||
# RUN /ai/venv/stablediffusion/bin/python -m pip install -r requirements.txt # should not be needed
|
||||
|
||||
# Install Kobold AI in /ai/
|
||||
WORKDIR /ai/koboldai
|
||||
RUN git clone https://github.com/YellowRoseCx/koboldcpp-rocm.git -b main --depth 1 /ai/git/koboldai
|
||||
RUN cp -R /ai/git/koboldai/* /ai/koboldai/
|
||||
# Create VENV for KoboldAI with inherit pytorch
|
||||
# RUN python3.10 -m venv /ai/venv/koboldai/ --system-site-packages # not needed actually
|
||||
# Install python requirements
|
||||
# RUN /ai/venv/koboldai/bin/python -m pip install -r requirements.txt # should not be needed
|
||||
# Build KoboldAI for ROCM
|
||||
RUN make LLAMA_HIPBLAS=1 -j4
|
||||
|
||||
# Install LlamaCPP in /ai/
|
||||
WORKDIR /ai/llamacpp
|
||||
RUN git clone https://github.com/ggerganov/llama.cpp.git -b master /ai/git/llamacpp
|
||||
RUN cp -R /ai/git/llamacpp/* /ai/llamacpp/
|
||||
# Install requirements for LlamaCPP build
|
||||
RUN apt-get install -y hipblas hipblaslt hipsparse hipcub hip-runtime-amd rocthrust rocthrust-dev rocrand
|
||||
# Build LlamaCPP for ROCM
|
||||
RUN make LLAMA_HIPBLAS=1 -j4
|
||||
|
||||
|
||||
# Set safe directory for extensions and stuff
|
||||
RUN git config --global --add safe.directory "*"
|
||||
|
||||
# Go back to ai folder when done
|
||||
WORKDIR /ai
|
||||
|
||||
# Setup PyTorch ENVs for AMD GPUs
|
||||
ENV PYTORCH_HIP_ALLOC_CONF=garbage_collection_threshold:0.8,max_split_size_mb:128
|
||||
# ENV HIP_VISIBLE_DEVICES="0"
|
||||
# ENV AMDGPU_TARGETS="gfx1030"
|
||||
# ENV HSA_OVERRIDE_GFX_VERSION='10.3.0'
|
95
docker-compose.yml
Normal file
95
docker-compose.yml
Normal file
@ -0,0 +1,95 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
stablediff-rocm:
|
||||
image: ai-suite-rocm:5.7
|
||||
container_name: stablediffusion-rocm
|
||||
environment:
|
||||
TZ: "Europe/Zurich"
|
||||
ROC_ENABLE_PRE_VEGA: 1
|
||||
COMMANDLINE_ARGS: "--listen --enable-insecure-extension-access --opt-split-attention"
|
||||
#HSA_OVERRIDE_GFX_VERSION: 10.3.0
|
||||
#PYTORCH_HIP_ALLOC_CONF: garbage_collection_threshold:0.8,max_split_size_mb:128
|
||||
entrypoint: ["/bin/sh", "-c"]
|
||||
working_dir: /ai/stablediffusion-webui/
|
||||
command: >
|
||||
"rocm-smi; echo launch.py $$COMMANDLINE_ARGS;
|
||||
if [ ! -f ./models/Stable-diffusion/*.ckpt ]; then
|
||||
echo 'Please copy stable diffusion model to stablediff-models directory'
|
||||
echo 'You may need sudo to perform this action'
|
||||
exit 1
|
||||
fi;
|
||||
chmod -R 777 /ai/stablediffusion-webui/outputs;
|
||||
/ai/venv/stablediffusion/bin/python launch.py"
|
||||
ports:
|
||||
- "5000:7860"
|
||||
devices:
|
||||
- "/dev/kfd:/dev/kfd"
|
||||
- "/dev/dri:/dev/dri"
|
||||
group_add:
|
||||
- video
|
||||
ipc: host
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
volumes:
|
||||
# - ./stablediffusion/webui:/ai/stablediffusion-webui
|
||||
- ./stablediffusion/models:/ai/stablediffusion-webui/models/
|
||||
- ./stablediffusion/embeddings:/ai/stablediffusion-webui/embeddings/
|
||||
- ./stablediffusion/extensions:/ai/stablediffusion-webui/extensions/
|
||||
- ./stablediffusion/outputs:/ai/stablediffusion-webui/outputs/
|
||||
|
||||
kobold-rocm:
|
||||
image: ai-suite-rocm:5.7
|
||||
container_name: koboldai-rocm
|
||||
environment:
|
||||
TZ: "Europe/Zurich"
|
||||
ROC_ENABLE_PRE_VEGA: 1
|
||||
COMMANDLINE_ARGS: ""
|
||||
# HSA_OVERRIDE_GFX_VERSION: 10.3.0
|
||||
# PYTORCH_HIP_ALLOC_CONF: garbage_collection_threshold:0.8,max_split_size_mb:128
|
||||
entrypoint: ["/bin/sh", "-c"]
|
||||
working_dir: /ai/koboldai/
|
||||
command: ["./koboldcpp.py --config config.kcpps"]
|
||||
ports:
|
||||
- "5001:5001"
|
||||
devices:
|
||||
- "/dev/kfd:/dev/kfd"
|
||||
- "/dev/dri:/dev/dri"
|
||||
group_add:
|
||||
- video
|
||||
ipc: host
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
volumes:
|
||||
- ./koboldai/config.kcpps:/ai/koboldai/config.kcpps
|
||||
- ./koboldai/models:/ai/koboldai/localmodels
|
||||
llamacpp-rocm:
|
||||
image: ai-suite-rocm:5.7
|
||||
container_name: llamacpp-rocm
|
||||
environment:
|
||||
TZ: "Europe/Zurich"
|
||||
ROC_ENABLE_PRE_VEGA: 1
|
||||
COMMANDLINE_ARGS: "-m /ai/llamacpp/models/llama2-13b-tiefighter.Q6_K.gguf -c 512 -b 1024 -n 256 --keep 48 --repeat_penalty 1.0 --color -i -r \"User:\" -f prompts/chat-with-ellie.txt"
|
||||
# HSA_OVERRIDE_GFX_VERSION: 10.3.0
|
||||
# PYTORCH_HIP_ALLOC_CONF: garbage_collection_threshold:0.8,max_split_size_mb:128
|
||||
entrypoint: ["/bin/sh", "-c"]
|
||||
working_dir: /ai/llamacpp/
|
||||
command: ["./main $$COMMANDLINE_ARGS"]
|
||||
tty: true
|
||||
devices:
|
||||
- "/dev/kfd:/dev/kfd"
|
||||
- "/dev/dri:/dev/dri"
|
||||
group_add:
|
||||
- video
|
||||
ipc: host
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
volumes:
|
||||
- ./llamacpp/models:/ai/llamacpp/models
|
||||
- ./llamacpp/extra:/ai/llamacpp/extra
|
1
koboldai/config.kcpps
Normal file
1
koboldai/config.kcpps
Normal file
@ -0,0 +1 @@
|
||||
{"model": null, "model_param": "/ai/koboldai/localmodels/ggml-vicuna-13B-1.1-q5_1.bin", "port": 5001, "port_param": 5001, "host": "", "launch": true, "lora": null, "config": null, "threads": 12, "blasthreads": null, "psutil_set_threads": false, "highpriority": false, "contextsize": 2048, "blasbatchsize": 512, "ropeconfig": [0.0, 10000.0], "stream": true, "smartcontext": false, "unbantokens": false, "bantokens": null, "usemirostat": null, "forceversion": 0, "nommap": false, "usemlock": false, "noavx2": false, "debugmode": false, "skiplauncher": false, "hordeconfig": null, "noblas": false, "useclblast": null, "usecublas": ["normal", "0", "mmq"], "gpulayers": 64, "tensor_split": null}
|
1
koboldai/models
Symbolic link
1
koboldai/models
Symbolic link
@ -0,0 +1 @@
|
||||
/mnt/DATA/LLM_MODELS/
|
1
llamacpp/models
Symbolic link
1
llamacpp/models
Symbolic link
@ -0,0 +1 @@
|
||||
/mnt/DATA/LLM_MODELS/
|
4
make_folders.sh
Executable file
4
make_folders.sh
Executable file
@ -0,0 +1,4 @@
|
||||
mkdir -p stablediffusion koboldai llamacpp
|
||||
ln -s '/mnt/DATA/SD_MODELS/' ./stablediffusion/models
|
||||
ln -s '/mnt/DATA/LLM_MODELS/' ./koboldai/models
|
||||
ln -s '/mnt/DATA/LLM_MODELS/' ./llamacpp/models
|
1
stablediffusion/models
Symbolic link
1
stablediffusion/models
Symbolic link
@ -0,0 +1 @@
|
||||
/mnt/DATA/SD_MODELS/
|
1
start_koboldai.sh
Executable file
1
start_koboldai.sh
Executable file
@ -0,0 +1 @@
|
||||
/usr/bin/docker-compose up -d kobold-rocm
|
1
start_llamacpp.sh
Executable file
1
start_llamacpp.sh
Executable file
@ -0,0 +1 @@
|
||||
/usr/bin/docker-compose up -d llama-rocm
|
1
start_stablediffusion.sh
Executable file
1
start_stablediffusion.sh
Executable file
@ -0,0 +1 @@
|
||||
/usr/bin/docker-compose up -d stablediff-rocm
|
Loading…
Reference in New Issue
Block a user