Update README.md
Browse files
README.md
CHANGED
@@ -36,7 +36,7 @@ load_in_4bit = False # Use 4bit quantization to reduce memory usage. Can be True
|
|
36 |
|
37 |
|
38 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
39 |
-
model_name = "
|
40 |
max_seq_length = max_seq_length,
|
41 |
dtype = dtype,
|
42 |
load_in_4bit = load_in_4bit,
|
|
|
36 |
|
37 |
|
38 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
39 |
+
model_name = "AdrienB134/French-Alpaca-Mistral-7B-v0.3",
|
40 |
max_seq_length = max_seq_length,
|
41 |
dtype = dtype,
|
42 |
load_in_4bit = load_in_4bit,
|