Commit
·
0a3f17c
1
Parent(s):
2468d2a
Create dataset.py
Browse files- dataset.py +44 -0
dataset.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator, Split
|
2 |
+
|
3 |
+
class CustomJSONLDataset(DatasetBuilder):
|
4 |
+
VERSION = "1.0.0"
|
5 |
+
|
6 |
+
def _info(self) -> DatasetInfo:
|
7 |
+
return DatasetInfo(
|
8 |
+
description="Custom dataset from local JSONL files.",
|
9 |
+
features={"text": "string"},
|
10 |
+
supervised_keys=None,
|
11 |
+
)
|
12 |
+
|
13 |
+
def _split_generators(self, dl_manager):
|
14 |
+
urls = [f"output_json_{i}.jsonl" for i in range(2, 3)]
|
15 |
+
|
16 |
+
# The download_and_extract function can handle multiple files and will
|
17 |
+
# download them in parallel if possible.
|
18 |
+
paths = dl_manager.download_and_extract(urls)
|
19 |
+
|
20 |
+
return [
|
21 |
+
SplitGenerator(
|
22 |
+
name=Split.TRAIN,
|
23 |
+
gen_kwargs={"filepaths": paths},
|
24 |
+
),
|
25 |
+
]
|
26 |
+
|
27 |
+
def _generate_examples(self, filepaths):
|
28 |
+
"""Yields examples from the dataset."""
|
29 |
+
import json
|
30 |
+
|
31 |
+
for filepath in filepaths:
|
32 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
33 |
+
for idx, line in enumerate(f):
|
34 |
+
data = json.loads(line.strip())
|
35 |
+
yield idx, {"text": data["text"]}
|
36 |
+
|
37 |
+
# The following lines are useful for testing locally, to ensure the script works as expected.
|
38 |
+
# They're not necessary when the script is integrated into a HuggingFace Datasets repository.
|
39 |
+
if __name__ == "__main__":
|
40 |
+
from datasets import load_dataset
|
41 |
+
|
42 |
+
# This will use the custom dataset loading script.
|
43 |
+
dataset = load_dataset("path_to_this_script.py")
|
44 |
+
print(dataset["train"][0])
|