apepkuss79 commited on
Commit
fa5256b
·
verified ·
1 Parent(s): a33cb80

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -12
README.md CHANGED
@@ -28,22 +28,16 @@ quantized_by: Second State Inc.
28
 
29
  - LlamaEdge version: coming soon
30
 
31
- <!-- - LlamaEdge version: [v0.12.3](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.12.3) and above
32
 
33
  - Prompt template
34
 
35
- - Prompt type: `deepseek-chat-2`
36
 
37
  - Prompt string
38
 
39
  ```text
40
- <|begin_of_sentence|>{system_message}
41
-
42
- User: {user_message_1}
43
-
44
- Assistant: {assistant_message_1}<|end_of_sentence|>User: {user_message_2}
45
-
46
- Assistant:
47
  ```
48
 
49
  - Context size: `128000`
@@ -54,7 +48,7 @@ quantized_by: Second State Inc.
54
  wasmedge --dir .:. \
55
  --nn-preload default:GGML:AUTO:DeepSeek-V2.5-Q5_K_M.gguf \
56
  llama-api-server.wasm \
57
- --prompt-template deepseek-chat-2 \
58
  --ctx-size 128000 \
59
  --model-name DeepSeek-V2.5
60
  ```
@@ -65,8 +59,8 @@ quantized_by: Second State Inc.
65
  wasmedge --dir .:. \
66
  --nn-preload default:GGML:AUTO:DeepSeek-V2.5-Q5_K_M.gguf \
67
  llama-chat.wasm \
68
- --prompt-template deepseek-chat-2 \
69
  --ctx-size 128000
70
- ``` -->
71
 
72
  *Quatized with llama.cpp b3664*
 
28
 
29
  - LlamaEdge version: coming soon
30
 
31
+ <!-- - LlamaEdge version: [v0.12.3](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.12.3) and above -->
32
 
33
  - Prompt template
34
 
35
+ - Prompt type: `deepseek-chat-25`
36
 
37
  - Prompt string
38
 
39
  ```text
40
+ <|begin_of_sentence|>{system_message}<|User|>{user_message_1}<|Assistant|>{assistant_message_1}<|end_of_sentence|><|User|>{user_message_2}<|Assistant|>
 
 
 
 
 
 
41
  ```
42
 
43
  - Context size: `128000`
 
48
  wasmedge --dir .:. \
49
  --nn-preload default:GGML:AUTO:DeepSeek-V2.5-Q5_K_M.gguf \
50
  llama-api-server.wasm \
51
+ --prompt-template deepseek-chat-25 \
52
  --ctx-size 128000 \
53
  --model-name DeepSeek-V2.5
54
  ```
 
59
  wasmedge --dir .:. \
60
  --nn-preload default:GGML:AUTO:DeepSeek-V2.5-Q5_K_M.gguf \
61
  llama-chat.wasm \
62
+ --prompt-template deepseek-chat-25 \
63
  --ctx-size 128000
64
+ ```
65
 
66
  *Quatized with llama.cpp b3664*