Upload nvidia.Minitron-4B-Base.Q3_K_S.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- nvidia.Minitron-4B-Base.Q3_K_S.gguf +3 -0
.gitattributes
CHANGED
@@ -40,3 +40,4 @@ nvidia.Minitron-4B-Base.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
|
40 |
nvidia.Minitron-4B-Base.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
nvidia.Minitron-4B-Base.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
nvidia.Minitron-4B-Base.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
40 |
nvidia.Minitron-4B-Base.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
nvidia.Minitron-4B-Base.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
nvidia.Minitron-4B-Base.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
+
nvidia.Minitron-4B-Base.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
nvidia.Minitron-4B-Base.Q3_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3eb43bf675f7bcfeb467afe5bad3cfb75fd6e90cbc5425a0497ca8212f00dfc3
|
3 |
+
size 2115573600
|