Update README.md
Browse files
README.md
CHANGED
@@ -67,10 +67,7 @@ To use this model, follow the example code provided below. Ensure you have the n
|
|
67 |
### Installation
|
68 |
|
69 |
```bash
|
70 |
-
pip install transformers
|
71 |
-
pip install bitsandbytes
|
72 |
-
pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
|
73 |
-
pip install llmtuner
|
74 |
```
|
75 |
|
76 |
### Run the model
|
@@ -81,7 +78,7 @@ from llmtuner.extras.misc import torch_gc
|
|
81 |
|
82 |
chat_model = ChatModel(dict(
|
83 |
model_name_or_path="unsloth/llama-3-8b-Instruct-bnb-4bit", # use bnb-4bit-quantized Llama-3-8B-Instruct model
|
84 |
-
adapter_name_or_path="
|
85 |
finetuning_type="lora", # same to the one in training
|
86 |
template="llama3", # same to the one in training
|
87 |
quantization_bit=4, # load 4-bit quantized model
|
|
|
67 |
### Installation
|
68 |
|
69 |
```bash
|
70 |
+
pip install transformers bitsandbytes "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" llmtuner
|
|
|
|
|
|
|
71 |
```
|
72 |
|
73 |
### Run the model
|
|
|
78 |
|
79 |
chat_model = ChatModel(dict(
|
80 |
model_name_or_path="unsloth/llama-3-8b-Instruct-bnb-4bit", # use bnb-4bit-quantized Llama-3-8B-Instruct model
|
81 |
+
adapter_name_or_path="./", # load the saved LoRA adapters
|
82 |
finetuning_type="lora", # same to the one in training
|
83 |
template="llama3", # same to the one in training
|
84 |
quantization_bit=4, # load 4-bit quantized model
|