Upload nvidia.Minitron-4B-Base.Q3_K_L.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- nvidia.Minitron-4B-Base.Q3_K_L.gguf +3 -0
.gitattributes
CHANGED
@@ -41,3 +41,4 @@ nvidia.Minitron-4B-Base.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
|
41 |
nvidia.Minitron-4B-Base.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
nvidia.Minitron-4B-Base.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
nvidia.Minitron-4B-Base.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
41 |
nvidia.Minitron-4B-Base.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
nvidia.Minitron-4B-Base.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
nvidia.Minitron-4B-Base.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
+
nvidia.Minitron-4B-Base.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
nvidia.Minitron-4B-Base.Q3_K_L.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abceb305ef7f1a2d04d46dca0accffef97a8c93d0cc247c31e4b7dd3dc535a19
|
3 |
+
size 2452952928
|