Upload nvidia.Minitron-4B-Base.Q4_K_S.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- nvidia.Minitron-4B-Base.Q4_K_S.gguf +3 -0
.gitattributes
CHANGED
@@ -42,3 +42,4 @@ nvidia.Minitron-4B-Base.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
|
42 |
nvidia.Minitron-4B-Base.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
nvidia.Minitron-4B-Base.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
nvidia.Minitron-4B-Base.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
42 |
nvidia.Minitron-4B-Base.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
nvidia.Minitron-4B-Base.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
nvidia.Minitron-4B-Base.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
45 |
+
nvidia.Minitron-4B-Base.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
nvidia.Minitron-4B-Base.Q4_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1f5a02150348a55b61bfe2f2c7490a2ac77cdc280986a8ec2f3b396937e1fcf
|
3 |
+
size 2583353184
|