Pranav-10 commited on
Commit
0e20864
·
verified ·
1 Parent(s): 2d19ade

Added ML model

Browse files
Files changed (1) hide show
  1. SA_ML.ipynb +991 -0
SA_ML.ipynb ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "0f9f666f",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification, Trainer, TrainingArguments\n",
11
+ "from datasets import load_dataset\n",
12
+ "import torch\n",
13
+ "from sklearn.model_selection import train_test_split\n",
14
+ "from sklearn.metrics import accuracy_score, precision_recall_fscore_support"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": 3,
20
+ "id": "2f35116b",
21
+ "metadata": {},
22
+ "outputs": [
23
+ {
24
+ "data": {
25
+ "application/vnd.jupyter.widget-view+json": {
26
+ "model_id": "a3bdffef37cd4d5aaa090640d5384825",
27
+ "version_major": 2,
28
+ "version_minor": 0
29
+ },
30
+ "text/plain": [
31
+ "Map: 0%| | 0/25000 [00:00<?, ? examples/s]"
32
+ ]
33
+ },
34
+ "metadata": {},
35
+ "output_type": "display_data"
36
+ }
37
+ ],
38
+ "source": [
39
+ "# Load the IMDb dataset\n",
40
+ "dataset = load_dataset(\"imdb\")\n",
41
+ "\n",
42
+ "# Tokenizer function\n",
43
+ "tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')\n",
44
+ "\n",
45
+ "def tokenize_function(examples):\n",
46
+ " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True, max_length=512)\n",
47
+ "\n",
48
+ "# Tokenize the dataset\n",
49
+ "tokenized_datasets = dataset.map(tokenize_function, batched=True)\n",
50
+ "\n",
51
+ "# Format for PyTorch\n",
52
+ "train_dataset = tokenized_datasets[\"train\"].shuffle(seed=42).select(range(10000)) # Subset for training\n",
53
+ "test_dataset = tokenized_datasets[\"test\"].shuffle(seed=42).select(range(1000)) # Subset for testing\n",
54
+ "\n",
55
+ "train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])\n",
56
+ "test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])\n"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": 4,
62
+ "id": "93d6a61b",
63
+ "metadata": {},
64
+ "outputs": [
65
+ {
66
+ "name": "stderr",
67
+ "output_type": "stream",
68
+ "text": [
69
+ "Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_layer_norm.bias', 'vocab_transform.bias', 'vocab_projector.bias', 'vocab_layer_norm.weight', 'vocab_projector.weight', 'vocab_transform.weight']\n",
70
+ "- This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
71
+ "- This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
72
+ "Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.weight', 'pre_classifier.bias', 'classifier.bias', 'pre_classifier.weight']\n",
73
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
74
+ ]
75
+ }
76
+ ],
77
+ "source": [
78
+ "model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=2)\n"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 5,
84
+ "id": "58400de8",
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "training_args = TrainingArguments(\n",
89
+ " output_dir='./results',\n",
90
+ " num_train_epochs=3,\n",
91
+ " per_device_train_batch_size=16,\n",
92
+ " per_device_eval_batch_size=64,\n",
93
+ " warmup_steps=500,\n",
94
+ " weight_decay=0.01,\n",
95
+ " logging_dir='./logs',\n",
96
+ " evaluation_strategy='steps', \n",
97
+ " save_strategy='steps', \n",
98
+ " load_best_model_at_end=True,\n",
99
+ " logging_steps=50, \n",
100
+ " save_steps=50 \n",
101
+ ")\n"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "code",
106
+ "execution_count": 6,
107
+ "id": "3389ad91",
108
+ "metadata": {},
109
+ "outputs": [],
110
+ "source": [
111
+ "def compute_metrics(pred):\n",
112
+ " labels = pred.label_ids\n",
113
+ " preds = pred.predictions.argmax(-1)\n",
114
+ " precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')\n",
115
+ " acc = accuracy_score(labels, preds)\n",
116
+ " return {\n",
117
+ " 'accuracy': acc,\n",
118
+ " 'f1': f1,\n",
119
+ " 'precision': precision,\n",
120
+ " 'recall': recall\n",
121
+ " }\n"
122
+ ]
123
+ },
124
+ {
125
+ "cell_type": "code",
126
+ "execution_count": 7,
127
+ "id": "0d68d5ea",
128
+ "metadata": {},
129
+ "outputs": [
130
+ {
131
+ "name": "stderr",
132
+ "output_type": "stream",
133
+ "text": [
134
+ "The following columns in the training set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
135
+ "C:\\Users\\saime\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\transformers\\optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
136
+ " warnings.warn(\n",
137
+ "***** Running training *****\n",
138
+ " Num examples = 10000\n",
139
+ " Num Epochs = 3\n",
140
+ " Instantaneous batch size per device = 16\n",
141
+ " Total train batch size (w. parallel, distributed & accumulation) = 16\n",
142
+ " Gradient Accumulation steps = 1\n",
143
+ " Total optimization steps = 1875\n",
144
+ " Number of trainable parameters = 66955010\n"
145
+ ]
146
+ },
147
+ {
148
+ "data": {
149
+ "text/html": [
150
+ "\n",
151
+ " <div>\n",
152
+ " \n",
153
+ " <progress value='1875' max='1875' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
154
+ " [1875/1875 14:06:36, Epoch 3/3]\n",
155
+ " </div>\n",
156
+ " <table border=\"1\" class=\"dataframe\">\n",
157
+ " <thead>\n",
158
+ " <tr style=\"text-align: left;\">\n",
159
+ " <th>Step</th>\n",
160
+ " <th>Training Loss</th>\n",
161
+ " <th>Validation Loss</th>\n",
162
+ " <th>Accuracy</th>\n",
163
+ " <th>F1</th>\n",
164
+ " <th>Precision</th>\n",
165
+ " <th>Recall</th>\n",
166
+ " </tr>\n",
167
+ " </thead>\n",
168
+ " <tbody>\n",
169
+ " <tr>\n",
170
+ " <td>50</td>\n",
171
+ " <td>0.688800</td>\n",
172
+ " <td>0.680938</td>\n",
173
+ " <td>0.661000</td>\n",
174
+ " <td>0.543742</td>\n",
175
+ " <td>0.792157</td>\n",
176
+ " <td>0.413934</td>\n",
177
+ " </tr>\n",
178
+ " <tr>\n",
179
+ " <td>100</td>\n",
180
+ " <td>0.629000</td>\n",
181
+ " <td>0.465259</td>\n",
182
+ " <td>0.841000</td>\n",
183
+ " <td>0.819113</td>\n",
184
+ " <td>0.920716</td>\n",
185
+ " <td>0.737705</td>\n",
186
+ " </tr>\n",
187
+ " <tr>\n",
188
+ " <td>150</td>\n",
189
+ " <td>0.371200</td>\n",
190
+ " <td>0.323407</td>\n",
191
+ " <td>0.868000</td>\n",
192
+ " <td>0.867470</td>\n",
193
+ " <td>0.850394</td>\n",
194
+ " <td>0.885246</td>\n",
195
+ " </tr>\n",
196
+ " <tr>\n",
197
+ " <td>200</td>\n",
198
+ " <td>0.336300</td>\n",
199
+ " <td>0.374150</td>\n",
200
+ " <td>0.857000</td>\n",
201
+ " <td>0.836197</td>\n",
202
+ " <td>0.948052</td>\n",
203
+ " <td>0.747951</td>\n",
204
+ " </tr>\n",
205
+ " <tr>\n",
206
+ " <td>250</td>\n",
207
+ " <td>0.336700</td>\n",
208
+ " <td>0.312763</td>\n",
209
+ " <td>0.865000</td>\n",
210
+ " <td>0.871795</td>\n",
211
+ " <td>0.812389</td>\n",
212
+ " <td>0.940574</td>\n",
213
+ " </tr>\n",
214
+ " <tr>\n",
215
+ " <td>300</td>\n",
216
+ " <td>0.311800</td>\n",
217
+ " <td>0.296506</td>\n",
218
+ " <td>0.889000</td>\n",
219
+ " <td>0.882540</td>\n",
220
+ " <td>0.912473</td>\n",
221
+ " <td>0.854508</td>\n",
222
+ " </tr>\n",
223
+ " <tr>\n",
224
+ " <td>350</td>\n",
225
+ " <td>0.309800</td>\n",
226
+ " <td>0.286319</td>\n",
227
+ " <td>0.886000</td>\n",
228
+ " <td>0.886228</td>\n",
229
+ " <td>0.863813</td>\n",
230
+ " <td>0.909836</td>\n",
231
+ " </tr>\n",
232
+ " <tr>\n",
233
+ " <td>400</td>\n",
234
+ " <td>0.272300</td>\n",
235
+ " <td>0.292773</td>\n",
236
+ " <td>0.890000</td>\n",
237
+ " <td>0.884696</td>\n",
238
+ " <td>0.905579</td>\n",
239
+ " <td>0.864754</td>\n",
240
+ " </tr>\n",
241
+ " <tr>\n",
242
+ " <td>450</td>\n",
243
+ " <td>0.315100</td>\n",
244
+ " <td>0.419856</td>\n",
245
+ " <td>0.854000</td>\n",
246
+ " <td>0.831019</td>\n",
247
+ " <td>0.954787</td>\n",
248
+ " <td>0.735656</td>\n",
249
+ " </tr>\n",
250
+ " <tr>\n",
251
+ " <td>500</td>\n",
252
+ " <td>0.350900</td>\n",
253
+ " <td>0.298303</td>\n",
254
+ " <td>0.862000</td>\n",
255
+ " <td>0.869565</td>\n",
256
+ " <td>0.807018</td>\n",
257
+ " <td>0.942623</td>\n",
258
+ " </tr>\n",
259
+ " <tr>\n",
260
+ " <td>550</td>\n",
261
+ " <td>0.355200</td>\n",
262
+ " <td>0.333094</td>\n",
263
+ " <td>0.870000</td>\n",
264
+ " <td>0.852608</td>\n",
265
+ " <td>0.954315</td>\n",
266
+ " <td>0.770492</td>\n",
267
+ " </tr>\n",
268
+ " <tr>\n",
269
+ " <td>600</td>\n",
270
+ " <td>0.279900</td>\n",
271
+ " <td>0.282081</td>\n",
272
+ " <td>0.887000</td>\n",
273
+ " <td>0.879915</td>\n",
274
+ " <td>0.913907</td>\n",
275
+ " <td>0.848361</td>\n",
276
+ " </tr>\n",
277
+ " <tr>\n",
278
+ " <td>650</td>\n",
279
+ " <td>0.279200</td>\n",
280
+ " <td>0.288312</td>\n",
281
+ " <td>0.892000</td>\n",
282
+ " <td>0.883621</td>\n",
283
+ " <td>0.931818</td>\n",
284
+ " <td>0.840164</td>\n",
285
+ " </tr>\n",
286
+ " <tr>\n",
287
+ " <td>700</td>\n",
288
+ " <td>0.198600</td>\n",
289
+ " <td>0.338301</td>\n",
290
+ " <td>0.876000</td>\n",
291
+ " <td>0.863736</td>\n",
292
+ " <td>0.931280</td>\n",
293
+ " <td>0.805328</td>\n",
294
+ " </tr>\n",
295
+ " <tr>\n",
296
+ " <td>750</td>\n",
297
+ " <td>0.195600</td>\n",
298
+ " <td>0.292916</td>\n",
299
+ " <td>0.897000</td>\n",
300
+ " <td>0.897512</td>\n",
301
+ " <td>0.872340</td>\n",
302
+ " <td>0.924180</td>\n",
303
+ " </tr>\n",
304
+ " <tr>\n",
305
+ " <td>800</td>\n",
306
+ " <td>0.243400</td>\n",
307
+ " <td>0.289307</td>\n",
308
+ " <td>0.899000</td>\n",
309
+ " <td>0.900883</td>\n",
310
+ " <td>0.864407</td>\n",
311
+ " <td>0.940574</td>\n",
312
+ " </tr>\n",
313
+ " <tr>\n",
314
+ " <td>850</td>\n",
315
+ " <td>0.193000</td>\n",
316
+ " <td>0.304464</td>\n",
317
+ " <td>0.897000</td>\n",
318
+ " <td>0.894359</td>\n",
319
+ " <td>0.895277</td>\n",
320
+ " <td>0.893443</td>\n",
321
+ " </tr>\n",
322
+ " <tr>\n",
323
+ " <td>900</td>\n",
324
+ " <td>0.214500</td>\n",
325
+ " <td>0.257609</td>\n",
326
+ " <td>0.899000</td>\n",
327
+ " <td>0.895337</td>\n",
328
+ " <td>0.905660</td>\n",
329
+ " <td>0.885246</td>\n",
330
+ " </tr>\n",
331
+ " <tr>\n",
332
+ " <td>950</td>\n",
333
+ " <td>0.228000</td>\n",
334
+ " <td>0.279465</td>\n",
335
+ " <td>0.887000</td>\n",
336
+ " <td>0.891659</td>\n",
337
+ " <td>0.837838</td>\n",
338
+ " <td>0.952869</td>\n",
339
+ " </tr>\n",
340
+ " <tr>\n",
341
+ " <td>1000</td>\n",
342
+ " <td>0.208100</td>\n",
343
+ " <td>0.230380</td>\n",
344
+ " <td>0.910000</td>\n",
345
+ " <td>0.908537</td>\n",
346
+ " <td>0.901210</td>\n",
347
+ " <td>0.915984</td>\n",
348
+ " </tr>\n",
349
+ " <tr>\n",
350
+ " <td>1050</td>\n",
351
+ " <td>0.200600</td>\n",
352
+ " <td>0.307765</td>\n",
353
+ " <td>0.901000</td>\n",
354
+ " <td>0.902077</td>\n",
355
+ " <td>0.871893</td>\n",
356
+ " <td>0.934426</td>\n",
357
+ " </tr>\n",
358
+ " <tr>\n",
359
+ " <td>1100</td>\n",
360
+ " <td>0.210600</td>\n",
361
+ " <td>0.278725</td>\n",
362
+ " <td>0.901000</td>\n",
363
+ " <td>0.901493</td>\n",
364
+ " <td>0.876209</td>\n",
365
+ " <td>0.928279</td>\n",
366
+ " </tr>\n",
367
+ " <tr>\n",
368
+ " <td>1150</td>\n",
369
+ " <td>0.208200</td>\n",
370
+ " <td>0.283095</td>\n",
371
+ " <td>0.912000</td>\n",
372
+ " <td>0.909836</td>\n",
373
+ " <td>0.909836</td>\n",
374
+ " <td>0.909836</td>\n",
375
+ " </tr>\n",
376
+ " <tr>\n",
377
+ " <td>1200</td>\n",
378
+ " <td>0.201000</td>\n",
379
+ " <td>0.256353</td>\n",
380
+ " <td>0.901000</td>\n",
381
+ " <td>0.895238</td>\n",
382
+ " <td>0.925602</td>\n",
383
+ " <td>0.866803</td>\n",
384
+ " </tr>\n",
385
+ " <tr>\n",
386
+ " <td>1250</td>\n",
387
+ " <td>0.186200</td>\n",
388
+ " <td>0.249205</td>\n",
389
+ " <td>0.909000</td>\n",
390
+ " <td>0.906282</td>\n",
391
+ " <td>0.910973</td>\n",
392
+ " <td>0.901639</td>\n",
393
+ " </tr>\n",
394
+ " <tr>\n",
395
+ " <td>1300</td>\n",
396
+ " <td>0.080400</td>\n",
397
+ " <td>0.367344</td>\n",
398
+ " <td>0.902000</td>\n",
399
+ " <td>0.900609</td>\n",
400
+ " <td>0.891566</td>\n",
401
+ " <td>0.909836</td>\n",
402
+ " </tr>\n",
403
+ " <tr>\n",
404
+ " <td>1350</td>\n",
405
+ " <td>0.152700</td>\n",
406
+ " <td>0.323376</td>\n",
407
+ " <td>0.905000</td>\n",
408
+ " <td>0.900315</td>\n",
409
+ " <td>0.922581</td>\n",
410
+ " <td>0.879098</td>\n",
411
+ " </tr>\n",
412
+ " <tr>\n",
413
+ " <td>1400</td>\n",
414
+ " <td>0.100400</td>\n",
415
+ " <td>0.416915</td>\n",
416
+ " <td>0.888000</td>\n",
417
+ " <td>0.891892</td>\n",
418
+ " <td>0.843066</td>\n",
419
+ " <td>0.946721</td>\n",
420
+ " </tr>\n",
421
+ " <tr>\n",
422
+ " <td>1450</td>\n",
423
+ " <td>0.108800</td>\n",
424
+ " <td>0.324885</td>\n",
425
+ " <td>0.908000</td>\n",
426
+ " <td>0.907258</td>\n",
427
+ " <td>0.892857</td>\n",
428
+ " <td>0.922131</td>\n",
429
+ " </tr>\n",
430
+ " <tr>\n",
431
+ " <td>1500</td>\n",
432
+ " <td>0.066700</td>\n",
433
+ " <td>0.378826</td>\n",
434
+ " <td>0.902000</td>\n",
435
+ " <td>0.901210</td>\n",
436
+ " <td>0.886905</td>\n",
437
+ " <td>0.915984</td>\n",
438
+ " </tr>\n",
439
+ " <tr>\n",
440
+ " <td>1550</td>\n",
441
+ " <td>0.078500</td>\n",
442
+ " <td>0.368980</td>\n",
443
+ " <td>0.906000</td>\n",
444
+ " <td>0.901674</td>\n",
445
+ " <td>0.920940</td>\n",
446
+ " <td>0.883197</td>\n",
447
+ " </tr>\n",
448
+ " <tr>\n",
449
+ " <td>1600</td>\n",
450
+ " <td>0.081500</td>\n",
451
+ " <td>0.364918</td>\n",
452
+ " <td>0.909000</td>\n",
453
+ " <td>0.907048</td>\n",
454
+ " <td>0.904277</td>\n",
455
+ " <td>0.909836</td>\n",
456
+ " </tr>\n",
457
+ " <tr>\n",
458
+ " <td>1650</td>\n",
459
+ " <td>0.062600</td>\n",
460
+ " <td>0.386855</td>\n",
461
+ " <td>0.905000</td>\n",
462
+ " <td>0.903943</td>\n",
463
+ " <td>0.892216</td>\n",
464
+ " <td>0.915984</td>\n",
465
+ " </tr>\n",
466
+ " <tr>\n",
467
+ " <td>1700</td>\n",
468
+ " <td>0.067000</td>\n",
469
+ " <td>0.392243</td>\n",
470
+ " <td>0.906000</td>\n",
471
+ " <td>0.905051</td>\n",
472
+ " <td>0.892430</td>\n",
473
+ " <td>0.918033</td>\n",
474
+ " </tr>\n",
475
+ " <tr>\n",
476
+ " <td>1750</td>\n",
477
+ " <td>0.047400</td>\n",
478
+ " <td>0.409893</td>\n",
479
+ " <td>0.910000</td>\n",
480
+ " <td>0.908350</td>\n",
481
+ " <td>0.902834</td>\n",
482
+ " <td>0.913934</td>\n",
483
+ " </tr>\n",
484
+ " <tr>\n",
485
+ " <td>1800</td>\n",
486
+ " <td>0.108200</td>\n",
487
+ " <td>0.401962</td>\n",
488
+ " <td>0.909000</td>\n",
489
+ " <td>0.907801</td>\n",
490
+ " <td>0.897796</td>\n",
491
+ " <td>0.918033</td>\n",
492
+ " </tr>\n",
493
+ " <tr>\n",
494
+ " <td>1850</td>\n",
495
+ " <td>0.105400</td>\n",
496
+ " <td>0.390589</td>\n",
497
+ " <td>0.912000</td>\n",
498
+ " <td>0.910020</td>\n",
499
+ " <td>0.908163</td>\n",
500
+ " <td>0.911885</td>\n",
501
+ " </tr>\n",
502
+ " </tbody>\n",
503
+ "</table><p>"
504
+ ],
505
+ "text/plain": [
506
+ "<IPython.core.display.HTML object>"
507
+ ]
508
+ },
509
+ "metadata": {},
510
+ "output_type": "display_data"
511
+ },
512
+ {
513
+ "name": "stderr",
514
+ "output_type": "stream",
515
+ "text": [
516
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
517
+ "***** Running Evaluation *****\n",
518
+ " Num examples = 1000\n",
519
+ " Batch size = 64\n",
520
+ "Saving model checkpoint to ./results\\checkpoint-50\n",
521
+ "Configuration saved in ./results\\checkpoint-50\\config.json\n",
522
+ "Model weights saved in ./results\\checkpoint-50\\pytorch_model.bin\n",
523
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
524
+ "***** Running Evaluation *****\n",
525
+ " Num examples = 1000\n",
526
+ " Batch size = 64\n",
527
+ "Saving model checkpoint to ./results\\checkpoint-100\n",
528
+ "Configuration saved in ./results\\checkpoint-100\\config.json\n",
529
+ "Model weights saved in ./results\\checkpoint-100\\pytorch_model.bin\n",
530
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
531
+ "***** Running Evaluation *****\n",
532
+ " Num examples = 1000\n",
533
+ " Batch size = 64\n",
534
+ "Saving model checkpoint to ./results\\checkpoint-150\n",
535
+ "Configuration saved in ./results\\checkpoint-150\\config.json\n",
536
+ "Model weights saved in ./results\\checkpoint-150\\pytorch_model.bin\n",
537
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
538
+ "***** Running Evaluation *****\n",
539
+ " Num examples = 1000\n",
540
+ " Batch size = 64\n",
541
+ "Saving model checkpoint to ./results\\checkpoint-200\n",
542
+ "Configuration saved in ./results\\checkpoint-200\\config.json\n",
543
+ "Model weights saved in ./results\\checkpoint-200\\pytorch_model.bin\n",
544
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
545
+ "***** Running Evaluation *****\n",
546
+ " Num examples = 1000\n",
547
+ " Batch size = 64\n",
548
+ "Saving model checkpoint to ./results\\checkpoint-250\n",
549
+ "Configuration saved in ./results\\checkpoint-250\\config.json\n",
550
+ "Model weights saved in ./results\\checkpoint-250\\pytorch_model.bin\n",
551
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
552
+ "***** Running Evaluation *****\n",
553
+ " Num examples = 1000\n",
554
+ " Batch size = 64\n",
555
+ "Saving model checkpoint to ./results\\checkpoint-300\n",
556
+ "Configuration saved in ./results\\checkpoint-300\\config.json\n",
557
+ "Model weights saved in ./results\\checkpoint-300\\pytorch_model.bin\n",
558
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
559
+ "***** Running Evaluation *****\n",
560
+ " Num examples = 1000\n",
561
+ " Batch size = 64\n",
562
+ "Saving model checkpoint to ./results\\checkpoint-350\n",
563
+ "Configuration saved in ./results\\checkpoint-350\\config.json\n",
564
+ "Model weights saved in ./results\\checkpoint-350\\pytorch_model.bin\n",
565
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
566
+ "***** Running Evaluation *****\n",
567
+ " Num examples = 1000\n",
568
+ " Batch size = 64\n",
569
+ "Saving model checkpoint to ./results\\checkpoint-400\n",
570
+ "Configuration saved in ./results\\checkpoint-400\\config.json\n",
571
+ "Model weights saved in ./results\\checkpoint-400\\pytorch_model.bin\n",
572
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
573
+ "***** Running Evaluation *****\n",
574
+ " Num examples = 1000\n",
575
+ " Batch size = 64\n",
576
+ "Saving model checkpoint to ./results\\checkpoint-450\n",
577
+ "Configuration saved in ./results\\checkpoint-450\\config.json\n",
578
+ "Model weights saved in ./results\\checkpoint-450\\pytorch_model.bin\n",
579
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
580
+ "***** Running Evaluation *****\n",
581
+ " Num examples = 1000\n",
582
+ " Batch size = 64\n",
583
+ "Saving model checkpoint to ./results\\checkpoint-500\n",
584
+ "Configuration saved in ./results\\checkpoint-500\\config.json\n",
585
+ "Model weights saved in ./results\\checkpoint-500\\pytorch_model.bin\n",
586
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
587
+ "***** Running Evaluation *****\n",
588
+ " Num examples = 1000\n",
589
+ " Batch size = 64\n",
590
+ "Saving model checkpoint to ./results\\checkpoint-550\n",
591
+ "Configuration saved in ./results\\checkpoint-550\\config.json\n",
592
+ "Model weights saved in ./results\\checkpoint-550\\pytorch_model.bin\n",
593
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
594
+ "***** Running Evaluation *****\n",
595
+ " Num examples = 1000\n",
596
+ " Batch size = 64\n",
597
+ "Saving model checkpoint to ./results\\checkpoint-600\n",
598
+ "Configuration saved in ./results\\checkpoint-600\\config.json\n",
599
+ "Model weights saved in ./results\\checkpoint-600\\pytorch_model.bin\n",
600
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
601
+ "***** Running Evaluation *****\n",
602
+ " Num examples = 1000\n",
603
+ " Batch size = 64\n",
604
+ "Saving model checkpoint to ./results\\checkpoint-650\n",
605
+ "Configuration saved in ./results\\checkpoint-650\\config.json\n",
606
+ "Model weights saved in ./results\\checkpoint-650\\pytorch_model.bin\n",
607
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
608
+ "***** Running Evaluation *****\n",
609
+ " Num examples = 1000\n",
610
+ " Batch size = 64\n",
611
+ "Saving model checkpoint to ./results\\checkpoint-700\n",
612
+ "Configuration saved in ./results\\checkpoint-700\\config.json\n",
613
+ "Model weights saved in ./results\\checkpoint-700\\pytorch_model.bin\n",
614
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
615
+ "***** Running Evaluation *****\n",
616
+ " Num examples = 1000\n",
617
+ " Batch size = 64\n",
618
+ "Saving model checkpoint to ./results\\checkpoint-750\n",
619
+ "Configuration saved in ./results\\checkpoint-750\\config.json\n",
620
+ "Model weights saved in ./results\\checkpoint-750\\pytorch_model.bin\n",
621
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
622
+ "***** Running Evaluation *****\n",
623
+ " Num examples = 1000\n",
624
+ " Batch size = 64\n",
625
+ "Saving model checkpoint to ./results\\checkpoint-800\n",
626
+ "Configuration saved in ./results\\checkpoint-800\\config.json\n"
627
+ ]
628
+ },
629
+ {
630
+ "name": "stderr",
631
+ "output_type": "stream",
632
+ "text": [
633
+ "Model weights saved in ./results\\checkpoint-800\\pytorch_model.bin\n",
634
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
635
+ "***** Running Evaluation *****\n",
636
+ " Num examples = 1000\n",
637
+ " Batch size = 64\n",
638
+ "Saving model checkpoint to ./results\\checkpoint-850\n",
639
+ "Configuration saved in ./results\\checkpoint-850\\config.json\n",
640
+ "Model weights saved in ./results\\checkpoint-850\\pytorch_model.bin\n",
641
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
642
+ "***** Running Evaluation *****\n",
643
+ " Num examples = 1000\n",
644
+ " Batch size = 64\n",
645
+ "Saving model checkpoint to ./results\\checkpoint-900\n",
646
+ "Configuration saved in ./results\\checkpoint-900\\config.json\n",
647
+ "Model weights saved in ./results\\checkpoint-900\\pytorch_model.bin\n",
648
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
649
+ "***** Running Evaluation *****\n",
650
+ " Num examples = 1000\n",
651
+ " Batch size = 64\n",
652
+ "Saving model checkpoint to ./results\\checkpoint-950\n",
653
+ "Configuration saved in ./results\\checkpoint-950\\config.json\n",
654
+ "Model weights saved in ./results\\checkpoint-950\\pytorch_model.bin\n",
655
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
656
+ "***** Running Evaluation *****\n",
657
+ " Num examples = 1000\n",
658
+ " Batch size = 64\n",
659
+ "Saving model checkpoint to ./results\\checkpoint-1000\n",
660
+ "Configuration saved in ./results\\checkpoint-1000\\config.json\n",
661
+ "Model weights saved in ./results\\checkpoint-1000\\pytorch_model.bin\n",
662
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
663
+ "***** Running Evaluation *****\n",
664
+ " Num examples = 1000\n",
665
+ " Batch size = 64\n",
666
+ "Saving model checkpoint to ./results\\checkpoint-1050\n",
667
+ "Configuration saved in ./results\\checkpoint-1050\\config.json\n",
668
+ "Model weights saved in ./results\\checkpoint-1050\\pytorch_model.bin\n",
669
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
670
+ "***** Running Evaluation *****\n",
671
+ " Num examples = 1000\n",
672
+ " Batch size = 64\n",
673
+ "Saving model checkpoint to ./results\\checkpoint-1100\n",
674
+ "Configuration saved in ./results\\checkpoint-1100\\config.json\n",
675
+ "Model weights saved in ./results\\checkpoint-1100\\pytorch_model.bin\n",
676
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
677
+ "***** Running Evaluation *****\n",
678
+ " Num examples = 1000\n",
679
+ " Batch size = 64\n",
680
+ "Saving model checkpoint to ./results\\checkpoint-1150\n",
681
+ "Configuration saved in ./results\\checkpoint-1150\\config.json\n",
682
+ "Model weights saved in ./results\\checkpoint-1150\\pytorch_model.bin\n",
683
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
684
+ "***** Running Evaluation *****\n",
685
+ " Num examples = 1000\n",
686
+ " Batch size = 64\n",
687
+ "Saving model checkpoint to ./results\\checkpoint-1200\n",
688
+ "Configuration saved in ./results\\checkpoint-1200\\config.json\n",
689
+ "Model weights saved in ./results\\checkpoint-1200\\pytorch_model.bin\n",
690
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
691
+ "***** Running Evaluation *****\n",
692
+ " Num examples = 1000\n",
693
+ " Batch size = 64\n",
694
+ "Saving model checkpoint to ./results\\checkpoint-1250\n",
695
+ "Configuration saved in ./results\\checkpoint-1250\\config.json\n",
696
+ "Model weights saved in ./results\\checkpoint-1250\\pytorch_model.bin\n",
697
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
698
+ "***** Running Evaluation *****\n",
699
+ " Num examples = 1000\n",
700
+ " Batch size = 64\n",
701
+ "Saving model checkpoint to ./results\\checkpoint-1300\n",
702
+ "Configuration saved in ./results\\checkpoint-1300\\config.json\n",
703
+ "Model weights saved in ./results\\checkpoint-1300\\pytorch_model.bin\n",
704
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
705
+ "***** Running Evaluation *****\n",
706
+ " Num examples = 1000\n",
707
+ " Batch size = 64\n",
708
+ "Saving model checkpoint to ./results\\checkpoint-1350\n",
709
+ "Configuration saved in ./results\\checkpoint-1350\\config.json\n",
710
+ "Model weights saved in ./results\\checkpoint-1350\\pytorch_model.bin\n",
711
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
712
+ "***** Running Evaluation *****\n",
713
+ " Num examples = 1000\n",
714
+ " Batch size = 64\n",
715
+ "Saving model checkpoint to ./results\\checkpoint-1400\n",
716
+ "Configuration saved in ./results\\checkpoint-1400\\config.json\n",
717
+ "Model weights saved in ./results\\checkpoint-1400\\pytorch_model.bin\n",
718
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
719
+ "***** Running Evaluation *****\n",
720
+ " Num examples = 1000\n",
721
+ " Batch size = 64\n",
722
+ "Saving model checkpoint to ./results\\checkpoint-1450\n",
723
+ "Configuration saved in ./results\\checkpoint-1450\\config.json\n",
724
+ "Model weights saved in ./results\\checkpoint-1450\\pytorch_model.bin\n",
725
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
726
+ "***** Running Evaluation *****\n",
727
+ " Num examples = 1000\n",
728
+ " Batch size = 64\n",
729
+ "Saving model checkpoint to ./results\\checkpoint-1500\n",
730
+ "Configuration saved in ./results\\checkpoint-1500\\config.json\n",
731
+ "Model weights saved in ./results\\checkpoint-1500\\pytorch_model.bin\n",
732
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
733
+ "***** Running Evaluation *****\n",
734
+ " Num examples = 1000\n",
735
+ " Batch size = 64\n",
736
+ "Saving model checkpoint to ./results\\checkpoint-1550\n",
737
+ "Configuration saved in ./results\\checkpoint-1550\\config.json\n",
738
+ "Model weights saved in ./results\\checkpoint-1550\\pytorch_model.bin\n",
739
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
740
+ "***** Running Evaluation *****\n",
741
+ " Num examples = 1000\n"
742
+ ]
743
+ },
744
+ {
745
+ "name": "stderr",
746
+ "output_type": "stream",
747
+ "text": [
748
+ " Batch size = 64\n",
749
+ "Saving model checkpoint to ./results\\checkpoint-1600\n",
750
+ "Configuration saved in ./results\\checkpoint-1600\\config.json\n",
751
+ "Model weights saved in ./results\\checkpoint-1600\\pytorch_model.bin\n",
752
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
753
+ "***** Running Evaluation *****\n",
754
+ " Num examples = 1000\n",
755
+ " Batch size = 64\n",
756
+ "Saving model checkpoint to ./results\\checkpoint-1650\n",
757
+ "Configuration saved in ./results\\checkpoint-1650\\config.json\n",
758
+ "Model weights saved in ./results\\checkpoint-1650\\pytorch_model.bin\n",
759
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
760
+ "***** Running Evaluation *****\n",
761
+ " Num examples = 1000\n",
762
+ " Batch size = 64\n",
763
+ "Saving model checkpoint to ./results\\checkpoint-1700\n",
764
+ "Configuration saved in ./results\\checkpoint-1700\\config.json\n",
765
+ "Model weights saved in ./results\\checkpoint-1700\\pytorch_model.bin\n",
766
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
767
+ "***** Running Evaluation *****\n",
768
+ " Num examples = 1000\n",
769
+ " Batch size = 64\n",
770
+ "Saving model checkpoint to ./results\\checkpoint-1750\n",
771
+ "Configuration saved in ./results\\checkpoint-1750\\config.json\n",
772
+ "Model weights saved in ./results\\checkpoint-1750\\pytorch_model.bin\n",
773
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
774
+ "***** Running Evaluation *****\n",
775
+ " Num examples = 1000\n",
776
+ " Batch size = 64\n",
777
+ "Saving model checkpoint to ./results\\checkpoint-1800\n",
778
+ "Configuration saved in ./results\\checkpoint-1800\\config.json\n",
779
+ "Model weights saved in ./results\\checkpoint-1800\\pytorch_model.bin\n",
780
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
781
+ "***** Running Evaluation *****\n",
782
+ " Num examples = 1000\n",
783
+ " Batch size = 64\n",
784
+ "Saving model checkpoint to ./results\\checkpoint-1850\n",
785
+ "Configuration saved in ./results\\checkpoint-1850\\config.json\n",
786
+ "Model weights saved in ./results\\checkpoint-1850\\pytorch_model.bin\n",
787
+ "\n",
788
+ "\n",
789
+ "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
790
+ "\n",
791
+ "\n",
792
+ "Loading best model from ./results\\checkpoint-1000 (score: 0.23037973046302795).\n"
793
+ ]
794
+ },
795
+ {
796
+ "data": {
797
+ "text/plain": [
798
+ "TrainOutput(global_step=1875, training_loss=0.22492422332763673, metrics={'train_runtime': 50814.837, 'train_samples_per_second': 0.59, 'train_steps_per_second': 0.037, 'total_flos': 3974021959680000.0, 'train_loss': 0.22492422332763673, 'epoch': 3.0})"
799
+ ]
800
+ },
801
+ "execution_count": 7,
802
+ "metadata": {},
803
+ "output_type": "execute_result"
804
+ }
805
+ ],
806
+ "source": [
807
+ "trainer = Trainer(\n",
808
+ " model=model,\n",
809
+ " args=training_args,\n",
810
+ " train_dataset=train_dataset,\n",
811
+ " eval_dataset=test_dataset,\n",
812
+ " compute_metrics=compute_metrics,\n",
813
+ ")\n",
814
+ "\n",
815
+ "trainer.train()\n"
816
+ ]
817
+ },
818
+ {
819
+ "cell_type": "code",
820
+ "execution_count": 8,
821
+ "id": "e2b3a88e",
822
+ "metadata": {},
823
+ "outputs": [
824
+ {
825
+ "name": "stderr",
826
+ "output_type": "stream",
827
+ "text": [
828
+ "The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: text. If text are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.\n",
829
+ "***** Running Evaluation *****\n",
830
+ " Num examples = 1000\n",
831
+ " Batch size = 64\n"
832
+ ]
833
+ },
834
+ {
835
+ "data": {
836
+ "text/html": [
837
+ "\n",
838
+ " <div>\n",
839
+ " \n",
840
+ " <progress value='16' max='16' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
841
+ " [16/16 07:01]\n",
842
+ " </div>\n",
843
+ " "
844
+ ],
845
+ "text/plain": [
846
+ "<IPython.core.display.HTML object>"
847
+ ]
848
+ },
849
+ "metadata": {},
850
+ "output_type": "display_data"
851
+ },
852
+ {
853
+ "data": {
854
+ "text/plain": [
855
+ "{'eval_loss': 0.23037973046302795,\n",
856
+ " 'eval_accuracy': 0.91,\n",
857
+ " 'eval_f1': 0.9085365853658537,\n",
858
+ " 'eval_precision': 0.9012096774193549,\n",
859
+ " 'eval_recall': 0.9159836065573771,\n",
860
+ " 'eval_runtime': 450.0402,\n",
861
+ " 'eval_samples_per_second': 2.222,\n",
862
+ " 'eval_steps_per_second': 0.036,\n",
863
+ " 'epoch': 3.0}"
864
+ ]
865
+ },
866
+ "execution_count": 8,
867
+ "metadata": {},
868
+ "output_type": "execute_result"
869
+ }
870
+ ],
871
+ "source": [
872
+ "trainer.evaluate()"
873
+ ]
874
+ },
875
+ {
876
+ "cell_type": "code",
877
+ "execution_count": 9,
878
+ "id": "a15f4208",
879
+ "metadata": {},
880
+ "outputs": [
881
+ {
882
+ "name": "stderr",
883
+ "output_type": "stream",
884
+ "text": [
885
+ "Configuration saved in ./saved_model\\config.json\n",
886
+ "Model weights saved in ./saved_model\\pytorch_model.bin\n",
887
+ "tokenizer config file saved in ./saved_model\\tokenizer_config.json\n",
888
+ "Special tokens file saved in ./saved_model\\special_tokens_map.json\n"
889
+ ]
890
+ },
891
+ {
892
+ "data": {
893
+ "text/plain": [
894
+ "('./saved_model\\\\tokenizer_config.json',\n",
895
+ " './saved_model\\\\special_tokens_map.json',\n",
896
+ " './saved_model\\\\vocab.txt',\n",
897
+ " './saved_model\\\\added_tokens.json',\n",
898
+ " './saved_model\\\\tokenizer.json')"
899
+ ]
900
+ },
901
+ "execution_count": 9,
902
+ "metadata": {},
903
+ "output_type": "execute_result"
904
+ }
905
+ ],
906
+ "source": [
907
+ "model.save_pretrained('./saved_model')\n",
908
+ "tokenizer.save_pretrained('./saved_model')"
909
+ ]
910
+ },
911
+ {
912
+ "cell_type": "code",
913
+ "execution_count": 10,
914
+ "id": "eb978982",
915
+ "metadata": {},
916
+ "outputs": [
917
+ {
918
+ "name": "stdout",
919
+ "output_type": "stream",
920
+ "text": [
921
+ "positive\n"
922
+ ]
923
+ }
924
+ ],
925
+ "source": [
926
+ "def predict_sentiment(text):\n",
927
+ " inputs = tokenizer(text, return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n",
928
+ " inputs = {k: v.to(model.device) for k, v in inputs.items()}\n",
929
+ " with torch.no_grad():\n",
930
+ " logits = model(**inputs).logits\n",
931
+ " prediction = logits.argmax(-1).item()\n",
932
+ " return 'positive' if prediction == 1 else 'negative'\n",
933
+ "\n",
934
+ "# Test with a new sentence\n",
935
+ "print(predict_sentiment(\"This movie was great! I loved it.\"))\n"
936
+ ]
937
+ },
938
+ {
939
+ "cell_type": "code",
940
+ "execution_count": 3,
941
+ "id": "30dac866",
942
+ "metadata": {},
943
+ "outputs": [
944
+ {
945
+ "ename": "NameError",
946
+ "evalue": "name 'model' is not defined",
947
+ "output_type": "error",
948
+ "traceback": [
949
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
950
+ "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
951
+ "Input \u001b[1;32mIn [3]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241m.\u001b[39msave_pretrained(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./Sentimental_Analysis\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m 2\u001b[0m tokenizer\u001b[38;5;241m.\u001b[39msave_pretrained(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./Sentimental_Analysis\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
952
+ "\u001b[1;31mNameError\u001b[0m: name 'model' is not defined"
953
+ ]
954
+ }
955
+ ],
956
+ "source": [
957
+ "model.save_pretrained('./Sentimental_Analysis')\n",
958
+ "tokenizer.save_pretrained('./Sentimental_Analysis')\n"
959
+ ]
960
+ },
961
+ {
962
+ "cell_type": "code",
963
+ "execution_count": null,
964
+ "id": "f3b53c73",
965
+ "metadata": {},
966
+ "outputs": [],
967
+ "source": []
968
+ }
969
+ ],
970
+ "metadata": {
971
+ "kernelspec": {
972
+ "display_name": "Python 3 (ipykernel)",
973
+ "language": "python",
974
+ "name": "python3"
975
+ },
976
+ "language_info": {
977
+ "codemirror_mode": {
978
+ "name": "ipython",
979
+ "version": 3
980
+ },
981
+ "file_extension": ".py",
982
+ "mimetype": "text/x-python",
983
+ "name": "python",
984
+ "nbconvert_exporter": "python",
985
+ "pygments_lexer": "ipython3",
986
+ "version": "3.10.4"
987
+ }
988
+ },
989
+ "nbformat": 4,
990
+ "nbformat_minor": 5
991
+ }