sharpenb commited on
Commit
76261fa
·
verified ·
1 Parent(s): b661f57

Upload folder using huggingface_hub (#3)

Browse files

- b4418ee54388b4e25ac15b55d464fa8c8ce4b9e59435fd031f19d5947b17c189 (08a4eb5832280626d6f76671820b63c26e8f3b88)
- 66b917f0ec62dc7d4eab84531624cdcafa0bdc3e3aa61d302044e16adb6f4d34 (c8fe2dba7400817ad00ead138bd933b497d0c76a)
- c3a10bc050fcd07d9bb8bfb2bcb8580b7b14b54ae654e5737298451c9e4eb1a0 (367ad35b5687406798a67da696dd38de84362df4)

Files changed (2) hide show
  1. config.json +1 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpt1q38tpii5bn66us",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmp8h2_zh9iyslfgde3",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpt1q38tpi",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmp8h2_zh9i",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}