matthoffner commited on
Commit
dcad1d1
·
verified ·
1 Parent(s): 3c1ea57

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -3
Dockerfile CHANGED
@@ -26,8 +26,8 @@ ENV PATH="/usr/local/cuda/bin:$PATH" \
26
  WORKDIR /app
27
 
28
  # Download ggml and mmproj models from HuggingFace
29
- RUN wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q5_K_M.gguf && \
30
- wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
  RUN ls -al
@@ -37,4 +37,4 @@ RUN make LLAMA_CUBLAS=1
37
  EXPOSE 8080
38
 
39
  # Start the llava-server with models
40
- CMD ["--server", "--model", "llava-v1.6-mistral-7b.Q5_K_M.gguf", "--mmproj", "mmproj-model-f16.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "33"]
 
26
  WORKDIR /app
27
 
28
  # Download ggml and mmproj models from HuggingFace
29
+ RUN wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/ggml-yi-34b-f16-q_5_k.gguf && \
30
+ wget https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-llava-34b-f16-q6_k.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
  RUN ls -al
 
37
  EXPOSE 8080
38
 
39
  # Start the llava-server with models
40
+ CMD ["--server", "--model", "ggml-yi-34b-f16-q_5_k.gguf", "--mmproj", "mmproj-llava-34b-f16-q6_k.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "33"]