matthoffner commited on
Commit
2a412a5
·
verified ·
1 Parent(s): 14f0eb7

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -3
Dockerfile CHANGED
@@ -26,8 +26,8 @@ ENV PATH="/usr/local/cuda/bin:$PATH" \
26
  WORKDIR /app
27
 
28
  # Download ggml and mmproj models from HuggingFace
29
- RUN wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/ggml-yi-34b-f16-q_5_k.gguf && \
30
- wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-llava-34b-f16-q6_k.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
  RUN ls -al
@@ -37,4 +37,4 @@ RUN make LLAMA_CUBLAS=1
37
  EXPOSE 8080
38
 
39
  # Start the llava-server with models
40
- CMD ["--server", "--model", "ggml-yi-34b-f16-q_5_k.gguf", "--mmproj", "mmproj-llava-34b-f16-q6_k.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "61"]
 
26
  WORKDIR /app
27
 
28
  # Download ggml and mmproj models from HuggingFace
29
+ RUN wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mistral-7b-q_5_k.gguf && \
30
+ wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-mistral7b-f16.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
  RUN ls -al
 
37
  EXPOSE 8080
38
 
39
  # Start the llava-server with models
40
+ CMD ["--server", "--model", "mistral-7b-q_5_k.gguf", "--mmproj", "mmproj-mistral7b-f16.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "31"]