commit 738673ee676072882168b1f33bb31fce4153874b Author: Mathieu Broillet Date: Fri Nov 10 21:10:06 2023 +0100 Push to git diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9ee457e --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +stablediffusion/models/ +stablediffusion/outputs/ +stablediffusion/extensions/ +stablediffusion/embeddings/ +llamacpp/extra/ +llamacpp/models/ +koboldai/models/ +koboldai/presets/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..ff7f1d0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,69 @@ +FROM rocm/dev-ubuntu-22.04:5.7-complete +ENV DEBIAN_FRONTEND=noninteractive \ + PYTHONUNBUFFERED=1 \ + PYTHONIOENCODING=UTF-8 +WORKDIR /tmp + +# Install python 3.10 and requirements +RUN apt-get update -y && apt-get install -y software-properties-common && \ + add-apt-repository ppa:deadsnakes/ppa +RUN apt-get update &&\ + apt-get install -y \ + wget \ + git \ + python3.10 \ + python3-dev \ + python-is-python3 \ + python3.10-venv \ + rsync +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 +RUN python3.10 -m pip install --upgrade pip wheel setuptools + +# Install PyTorch +RUN python3.10 -m pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7 + +# Create ai folder for saving projects +RUN mkdir /ai/ + +# Install StableDiffusion in /ai/ +WORKDIR /ai/stablediffusion-webui +RUN git clone -b dev https://github.com/AUTOMATIC1111/stable-diffusion-webui /ai/git/stablediffusion-webui +RUN cp -R /ai/git/stablediffusion-webui/* /ai/stablediffusion-webui/ +# Create VENV for StableDiffusion with inherit pytorch +RUN python3.10 -m venv /ai/venv/stablediffusion/ --system-site-packages +RUN /ai/venv/stablediffusion/bin/python launch.py --skip-torch-cuda-test --exit +RUN /ai/venv/stablediffusion/bin/python -m pip install opencv-python-headless tomesd protobuf --upgrade +# RUN /ai/venv/stablediffusion/bin/python -m pip install -r requirements.txt # should not be needed + +# Install Kobold AI in /ai/ +WORKDIR /ai/koboldai +RUN git clone https://github.com/YellowRoseCx/koboldcpp-rocm.git -b main --depth 1 /ai/git/koboldai +RUN cp -R /ai/git/koboldai/* /ai/koboldai/ +# Create VENV for KoboldAI with inherit pytorch +# RUN python3.10 -m venv /ai/venv/koboldai/ --system-site-packages # not needed actually +# Install python requirements +# RUN /ai/venv/koboldai/bin/python -m pip install -r requirements.txt # should not be needed +# Build KoboldAI for ROCM +RUN make LLAMA_HIPBLAS=1 -j4 + +# Install LlamaCPP in /ai/ +WORKDIR /ai/llamacpp +RUN git clone https://github.com/ggerganov/llama.cpp.git -b master /ai/git/llamacpp +RUN cp -R /ai/git/llamacpp/* /ai/llamacpp/ +# Install requirements for LlamaCPP build +RUN apt-get install -y hipblas hipblaslt hipsparse hipcub hip-runtime-amd rocthrust rocthrust-dev rocrand +# Build LlamaCPP for ROCM +RUN make LLAMA_HIPBLAS=1 -j4 + + +# Set safe directory for extensions and stuff +RUN git config --global --add safe.directory "*" + +# Go back to ai folder when done +WORKDIR /ai + +# Setup PyTorch ENVs for AMD GPUs +ENV PYTORCH_HIP_ALLOC_CONF=garbage_collection_threshold:0.8,max_split_size_mb:128 +# ENV HIP_VISIBLE_DEVICES="0" +# ENV AMDGPU_TARGETS="gfx1030" +# ENV HSA_OVERRIDE_GFX_VERSION='10.3.0' diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..fe11e9c --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,95 @@ +version: '3' + +services: + stablediff-rocm: + image: ai-suite-rocm:5.7 + container_name: stablediffusion-rocm + environment: + TZ: "Europe/Zurich" + ROC_ENABLE_PRE_VEGA: 1 + COMMANDLINE_ARGS: "--listen --enable-insecure-extension-access --opt-split-attention" + #HSA_OVERRIDE_GFX_VERSION: 10.3.0 + #PYTORCH_HIP_ALLOC_CONF: garbage_collection_threshold:0.8,max_split_size_mb:128 + entrypoint: ["/bin/sh", "-c"] + working_dir: /ai/stablediffusion-webui/ + command: > + "rocm-smi; echo launch.py $$COMMANDLINE_ARGS; + if [ ! -f ./models/Stable-diffusion/*.ckpt ]; then + echo 'Please copy stable diffusion model to stablediff-models directory' + echo 'You may need sudo to perform this action' + exit 1 + fi; + chmod -R 777 /ai/stablediffusion-webui/outputs; + /ai/venv/stablediffusion/bin/python launch.py" + ports: + - "5000:7860" + devices: + - "/dev/kfd:/dev/kfd" + - "/dev/dri:/dev/dri" + group_add: + - video + ipc: host + cap_add: + - SYS_PTRACE + security_opt: + - seccomp:unconfined + volumes: + # - ./stablediffusion/webui:/ai/stablediffusion-webui + - ./stablediffusion/models:/ai/stablediffusion-webui/models/ + - ./stablediffusion/embeddings:/ai/stablediffusion-webui/embeddings/ + - ./stablediffusion/extensions:/ai/stablediffusion-webui/extensions/ + - ./stablediffusion/outputs:/ai/stablediffusion-webui/outputs/ + + kobold-rocm: + image: ai-suite-rocm:5.7 + container_name: koboldai-rocm + environment: + TZ: "Europe/Zurich" + ROC_ENABLE_PRE_VEGA: 1 + COMMANDLINE_ARGS: "" + # HSA_OVERRIDE_GFX_VERSION: 10.3.0 + # PYTORCH_HIP_ALLOC_CONF: garbage_collection_threshold:0.8,max_split_size_mb:128 + entrypoint: ["/bin/sh", "-c"] + working_dir: /ai/koboldai/ + command: ["./koboldcpp.py --config config.kcpps"] + ports: + - "5001:5001" + devices: + - "/dev/kfd:/dev/kfd" + - "/dev/dri:/dev/dri" + group_add: + - video + ipc: host + cap_add: + - SYS_PTRACE + security_opt: + - seccomp:unconfined + volumes: + - ./koboldai/config.kcpps:/ai/koboldai/config.kcpps + - ./koboldai/models:/ai/koboldai/localmodels + llamacpp-rocm: + image: ai-suite-rocm:5.7 + container_name: llamacpp-rocm + environment: + TZ: "Europe/Zurich" + ROC_ENABLE_PRE_VEGA: 1 + COMMANDLINE_ARGS: "-m /ai/llamacpp/models/llama2-13b-tiefighter.Q6_K.gguf -c 512 -b 1024 -n 256 --keep 48 --repeat_penalty 1.0 --color -i -r \"User:\" -f prompts/chat-with-ellie.txt" + # HSA_OVERRIDE_GFX_VERSION: 10.3.0 + # PYTORCH_HIP_ALLOC_CONF: garbage_collection_threshold:0.8,max_split_size_mb:128 + entrypoint: ["/bin/sh", "-c"] + working_dir: /ai/llamacpp/ + command: ["./main $$COMMANDLINE_ARGS"] + tty: true + devices: + - "/dev/kfd:/dev/kfd" + - "/dev/dri:/dev/dri" + group_add: + - video + ipc: host + cap_add: + - SYS_PTRACE + security_opt: + - seccomp:unconfined + volumes: + - ./llamacpp/models:/ai/llamacpp/models + - ./llamacpp/extra:/ai/llamacpp/extra diff --git a/koboldai/config.kcpps b/koboldai/config.kcpps new file mode 100644 index 0000000..e9ffd66 --- /dev/null +++ b/koboldai/config.kcpps @@ -0,0 +1 @@ +{"model": null, "model_param": "/ai/koboldai/localmodels/ggml-vicuna-13B-1.1-q5_1.bin", "port": 5001, "port_param": 5001, "host": "", "launch": true, "lora": null, "config": null, "threads": 12, "blasthreads": null, "psutil_set_threads": false, "highpriority": false, "contextsize": 2048, "blasbatchsize": 512, "ropeconfig": [0.0, 10000.0], "stream": true, "smartcontext": false, "unbantokens": false, "bantokens": null, "usemirostat": null, "forceversion": 0, "nommap": false, "usemlock": false, "noavx2": false, "debugmode": false, "skiplauncher": false, "hordeconfig": null, "noblas": false, "useclblast": null, "usecublas": ["normal", "0", "mmq"], "gpulayers": 64, "tensor_split": null} diff --git a/koboldai/models b/koboldai/models new file mode 120000 index 0000000..9774a70 --- /dev/null +++ b/koboldai/models @@ -0,0 +1 @@ +/mnt/DATA/LLM_MODELS/ \ No newline at end of file diff --git a/llamacpp/models b/llamacpp/models new file mode 120000 index 0000000..9774a70 --- /dev/null +++ b/llamacpp/models @@ -0,0 +1 @@ +/mnt/DATA/LLM_MODELS/ \ No newline at end of file diff --git a/make_folders.sh b/make_folders.sh new file mode 100755 index 0000000..a90fce3 --- /dev/null +++ b/make_folders.sh @@ -0,0 +1,4 @@ +mkdir -p stablediffusion koboldai llamacpp +ln -s '/mnt/DATA/SD_MODELS/' ./stablediffusion/models +ln -s '/mnt/DATA/LLM_MODELS/' ./koboldai/models +ln -s '/mnt/DATA/LLM_MODELS/' ./llamacpp/models diff --git a/stablediffusion/models b/stablediffusion/models new file mode 120000 index 0000000..06e27cd --- /dev/null +++ b/stablediffusion/models @@ -0,0 +1 @@ +/mnt/DATA/SD_MODELS/ \ No newline at end of file diff --git a/start_koboldai.sh b/start_koboldai.sh new file mode 100755 index 0000000..8c1b060 --- /dev/null +++ b/start_koboldai.sh @@ -0,0 +1 @@ +/usr/bin/docker-compose up -d kobold-rocm \ No newline at end of file diff --git a/start_llamacpp.sh b/start_llamacpp.sh new file mode 100755 index 0000000..d8e5cde --- /dev/null +++ b/start_llamacpp.sh @@ -0,0 +1 @@ +/usr/bin/docker-compose up -d llama-rocm \ No newline at end of file diff --git a/start_stablediffusion.sh b/start_stablediffusion.sh new file mode 100755 index 0000000..0db1a33 --- /dev/null +++ b/start_stablediffusion.sh @@ -0,0 +1 @@ +/usr/bin/docker-compose up -d stablediff-rocm \ No newline at end of file