sharpenb commited on
Commit
30122c0
·
verified ·
1 Parent(s): 76261fa

Upload folder using huggingface_hub (#4)

Browse files

- b4418ee54388b4e25ac15b55d464fa8c8ce4b9e59435fd031f19d5947b17c189 (ee115dea7747e4e299609967dbe701691f870643)
- 66b917f0ec62dc7d4eab84531624cdcafa0bdc3e3aa61d302044e16adb6f4d34 (2e7c005b4d2316fbd6fedfe6d422a20ae8f94dc9)
- 123388211520c9f874656e8af77b2a77a965f85075fff600c4b5256a1a7ab868 (e478d07199e74350aa7636c8b99100c5a4e49cc1)

Files changed (2) hide show
  1. config.json +1 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmp8h2_zh9iyslfgde3",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmp4mfb674hsh84xn2x",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmp8h2_zh9i",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmp4mfb674h",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}