XanderJC commited on
Commit
0f77787
·
verified ·
1 Parent(s): 9e65c93

Push model using huggingface_hub.

Browse files
README.md CHANGED
@@ -24,7 +24,7 @@ You can then generate text as follows:
24
  ```python
25
  from transformers import pipeline
26
 
27
- generator = pipeline("text-generation", model="XanderJC//tmp/tmpx7vqbd73/XanderJC/llama-3-8b-orca-rlhf")
28
  outputs = generator("Hello, my llama is cute")
29
  ```
30
 
@@ -34,8 +34,8 @@ If you want to use the model for training or to obtain the outputs from the valu
34
  from transformers import AutoTokenizer
35
  from trl import AutoModelForCausalLMWithValueHead
36
 
37
- tokenizer = AutoTokenizer.from_pretrained("XanderJC//tmp/tmpx7vqbd73/XanderJC/llama-3-8b-orca-rlhf")
38
- model = AutoModelForCausalLMWithValueHead.from_pretrained("XanderJC//tmp/tmpx7vqbd73/XanderJC/llama-3-8b-orca-rlhf")
39
 
40
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
41
  outputs = model(**inputs, labels=inputs["input_ids"])
 
24
  ```python
25
  from transformers import pipeline
26
 
27
+ generator = pipeline("text-generation", model="XanderJC//tmp/tmptjma5k3d/XanderJC/llama-3-8b-orca-rlhf")
28
  outputs = generator("Hello, my llama is cute")
29
  ```
30
 
 
34
  from transformers import AutoTokenizer
35
  from trl import AutoModelForCausalLMWithValueHead
36
 
37
+ tokenizer = AutoTokenizer.from_pretrained("XanderJC//tmp/tmptjma5k3d/XanderJC/llama-3-8b-orca-rlhf")
38
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("XanderJC//tmp/tmptjma5k3d/XanderJC/llama-3-8b-orca-rlhf")
39
 
40
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
41
  outputs = model(**inputs, labels=inputs["input_ids"])
adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f3f57cb244bd19e931135175a7606efc60463905ef0367815d92c57c3c96057
3
  size 54543184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21fd2e5948ee21dc0f71539de1086091bdbbd1d8d0949a18c379146351dd1656
3
  size 54543184
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba50ab5155cc2ee6e319e5f4455a7f1a4f5ff3fdb1cc3b44425aa10bf8189f5a
3
  size 17916
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de91c2883775765d666f20488e13983479351d89fdb514685fe4529916f3760f
3
  size 17916