christti commited on
Commit
5b858ef
·
1 Parent(s): 30d527c

Added classes for multi-label classification

Browse files
Files changed (1) hide show
  1. coco_dataset_multi_label_script.py +139 -15
coco_dataset_multi_label_script.py CHANGED
@@ -1,6 +1,7 @@
1
  import json
2
  import os
3
  import datasets
 
4
 
5
 
6
  class COCOBuilderConfig(datasets.BuilderConfig):
@@ -81,6 +82,8 @@ class COCODataset(datasets.GeneratorBasedBuilder):
81
  "file_name": datasets.Value("string"),
82
  "coco_url": datasets.Value("string"),
83
  "image_path": datasets.Value("string"),
 
 
84
  }
85
 
86
  features = datasets.Features(feature_dict)
@@ -120,7 +123,6 @@ class COCODataset(datasets.GeneratorBasedBuilder):
120
  "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"),
121
  "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"),
122
  }
123
- archive_path = dl_manager.download_and_extract(_DL_URLS)
124
 
125
  splits = []
126
  for split in self.config.splits:
@@ -129,8 +131,9 @@ class COCODataset(datasets.GeneratorBasedBuilder):
129
  name=datasets.Split.TRAIN,
130
  # These kwargs will be passed to _generate_examples
131
  gen_kwargs={
132
- "json_path": os.path.join(archive_path["annotations_trainval"], "annotations", "captions_train2017.json"),
133
- "image_dir": os.path.join(archive_path["train"], "train2017"),
 
134
  "split": "train",
135
  }
136
  )
@@ -139,8 +142,9 @@ class COCODataset(datasets.GeneratorBasedBuilder):
139
  name=datasets.Split.VALIDATION,
140
  # These kwargs will be passed to _generate_examples
141
  gen_kwargs={
142
- "json_path": os.path.join(archive_path["annotations_trainval"], "annotations", "captions_val2017.json"),
143
- "image_dir": os.path.join(archive_path["val"], "val2017"),
 
144
  "split": "valid",
145
  },
146
  )
@@ -149,8 +153,9 @@ class COCODataset(datasets.GeneratorBasedBuilder):
149
  name=datasets.Split.TEST,
150
  # These kwargs will be passed to _generate_examples
151
  gen_kwargs={
152
- "json_path": os.path.join(archive_path["image_info_test"], "annotations", "image_info_test2017.json"),
153
- "image_dir": os.path.join(archive_path["test"], "test2017"),
 
154
  "split": "test",
155
  },
156
  )
@@ -160,42 +165,150 @@ class COCODataset(datasets.GeneratorBasedBuilder):
160
  splits.append(dataset)
161
 
162
  return splits
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
  def _generate_examples(
165
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
166
- self, json_path, image_dir, split
167
  ):
168
- """ Yields examples as (key, example) tuples. """
169
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
170
  # The `key` is here for legacy reason (tfds) and is not important in itself.
171
 
172
- _features = ["image_id", "caption_id", "caption", "height", "width", "file_name", "coco_url", "image_path", "id"]
173
  features = list(_features)
174
 
175
  if split in "valid":
176
  split = "val"
177
 
178
- with open(json_path, 'r', encoding='UTF-8') as fp:
179
- data = json.load(fp)
 
 
 
180
 
181
  # list of dict
182
- images = data["images"]
 
183
  entries = images
184
 
 
 
 
185
  # build a dict of image_id -> image info dict
186
  d = {image["id"]: image for image in images}
187
 
 
 
 
 
 
 
 
 
 
 
188
  # list of dict
189
  if split in ["train", "val"]:
190
- annotations = data["annotations"]
191
 
192
  # build a dict of image_id ->
193
  for annotation in annotations:
194
  _id = annotation["id"]
195
- image_info = d[annotation["image_id"]]
 
196
  annotation.update(image_info)
197
  annotation["id"] = _id
198
 
 
 
 
 
 
 
 
 
 
199
  entries = annotations
200
 
201
  for id_, entry in enumerate(entries):
@@ -213,3 +326,14 @@ class COCODataset(datasets.GeneratorBasedBuilder):
213
  entry = {k: entry[k] for k in _features if k in entry}
214
 
215
  yield str((entry["image_id"], entry["caption_id"])), entry
 
 
 
 
 
 
 
 
 
 
 
 
1
  import json
2
  import os
3
  import datasets
4
+ import torch
5
 
6
 
7
  class COCOBuilderConfig(datasets.BuilderConfig):
 
82
  "file_name": datasets.Value("string"),
83
  "coco_url": datasets.Value("string"),
84
  "image_path": datasets.Value("string"),
85
+ "category_ids": datasets.Sequence(datasets.Value("int64")),
86
+ "category_one_hot": datasets.Sequence(datasets.Value("int64")),
87
  }
88
 
89
  features = datasets.Features(feature_dict)
 
123
  "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"),
124
  "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"),
125
  }
 
126
 
127
  splits = []
128
  for split in self.config.splits:
 
131
  name=datasets.Split.TRAIN,
132
  # These kwargs will be passed to _generate_examples
133
  gen_kwargs={
134
+ "captions_json_path": os.path.join(data_dir, "annotations", "captions_train2017.json"),
135
+ "instances_json_path": os.path.join(data_dir, "annotations", "instances_train2017.json"),
136
+ "image_dir": os.path.join(data_dir, "train2017"),
137
  "split": "train",
138
  }
139
  )
 
142
  name=datasets.Split.VALIDATION,
143
  # These kwargs will be passed to _generate_examples
144
  gen_kwargs={
145
+ "captions_json_path": os.path.join(data_dir, "annotations", "captions_val2017.json"),
146
+ "instances_json_path": os.path.join(data_dir, "annotations", "instances_val2017.json"),
147
+ "image_dir": os.path.join(data_dir, "val2017"),
148
  "split": "valid",
149
  },
150
  )
 
153
  name=datasets.Split.TEST,
154
  # These kwargs will be passed to _generate_examples
155
  gen_kwargs={
156
+ "captions_json_path": os.path.join(data_dir, "annotations", "image_info_test2017.json"),
157
+ "instances_json_path": os.path.join(data_dir, "annotations", "image_info_test2017.json"), # "instances_test2017.json
158
+ "image_dir": os.path.join(data_dir, "test2017"),
159
  "split": "test",
160
  },
161
  )
 
165
  splits.append(dataset)
166
 
167
  return splits
168
+
169
+ # instances.json
170
+ # {
171
+ # "info": {
172
+ # "year": "2020",
173
+ # "version": "1",
174
+ # "description": "Exported from roboflow.ai",
175
+ # "contributor": "Roboflow",
176
+ # "url": "https://app.roboflow.ai/datasets/hard-hat-sample/1",
177
+ # "date_created": "2000-01-01T00:00:00+00:00"
178
+ # },
179
+ # "licenses": [
180
+ # {
181
+ # "id": 1,
182
+ # "url": "https://creativecommons.org/publicdomain/zero/1.0/",
183
+ # "name": "Public Domain"
184
+ # }
185
+ # ],
186
+ # "categories": [
187
+ # {
188
+ # "id": 0,
189
+ # "name": "Workers",
190
+ # "supercategory": "none"
191
+ # },
192
+ # {
193
+ # "id": 1,
194
+ # "name": "head",
195
+ # "supercategory": "Workers"
196
+ # },
197
+ # {
198
+ # "id": 2,
199
+ # "name": "helmet",
200
+ # "supercategory": "Workers"
201
+ # },
202
+ # {
203
+ # "id": 3,
204
+ # "name": "person",
205
+ # "supercategory": "Workers"
206
+ # }
207
+ # ],
208
+ # "images": [
209
+ # {
210
+ # "id": 0,
211
+ # "license": 1,
212
+ # "file_name": "0001.jpg",
213
+ # "height": 275,
214
+ # "width": 490,
215
+ # "date_captured": "2020-07-20T19:39:26+00:00"
216
+ # }
217
+ # ],
218
+ # "annotations": [
219
+ # {
220
+ # "id": 0,
221
+ # "image_id": 0,
222
+ # "category_id": 2,
223
+ # "bbox": [
224
+ # 45,
225
+ # 2,
226
+ # 85,
227
+ # 85
228
+ # ],
229
+ # "area": 7225,
230
+ # "segmentation": [],
231
+ # "iscrowd": 0
232
+ # },
233
+ # {
234
+ # "id": 1,
235
+ # "image_id": 0,
236
+ # "category_id": 2,
237
+ # "bbox": [
238
+ # 324,
239
+ # 29,
240
+ # 72,
241
+ # 81
242
+ # ],
243
+ # "area": 5832,
244
+ # "segmentation": [],
245
+ # "iscrowd": 0
246
+ # }
247
+ # ]
248
+ # }
249
 
250
  def _generate_examples(
251
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
252
+ self, captions_json_path, instances_json_path, image_dir, split
253
  ):
254
+ """ Yields examples as (key, example, categories) tuples. """
255
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
256
  # The `key` is here for legacy reason (tfds) and is not important in itself.
257
 
258
+ _features = ["image_id", "caption_id", "caption", "height", "width", "file_name", "coco_url", "image_path", "id", "category_ids", "category_one_hot"]
259
  features = list(_features)
260
 
261
  if split in "valid":
262
  split = "val"
263
 
264
+ with open(captions_json_path, 'r', encoding='UTF-8') as fp:
265
+ captions_data = json.load(fp)
266
+
267
+ with open(instances_json_path, 'r', encoding='UTF-8') as fp:
268
+ instances_data = json.load(fp)
269
 
270
  # list of dict
271
+ images = captions_data["images"]
272
+ instances_annotations = instances_data["annotations"]
273
  entries = images
274
 
275
+ self.classes = list(map(lambda x: {'id': x['id'], 'name': x['name']}, instances_data['categories']))
276
+ self.num_classes = len(self.classes)
277
+
278
  # build a dict of image_id -> image info dict
279
  d = {image["id"]: image for image in images}
280
 
281
+ # build a dict of image_id -> list of category_ids
282
+ cat_ids_dict = {}
283
+
284
+ for annotation in instances_annotations:
285
+ image_id = annotation["image_id"]
286
+ category_id = annotation["category_id"]
287
+ if image_id not in cat_ids_dict:
288
+ cat_ids_dict[image_id] = set([])
289
+ cat_ids_dict[image_id].add(category_id)
290
+
291
  # list of dict
292
  if split in ["train", "val"]:
293
+ annotations = captions_data["annotations"]
294
 
295
  # build a dict of image_id ->
296
  for annotation in annotations:
297
  _id = annotation["id"]
298
+ image_id = annotation["image_id"]
299
+ image_info = d[image_id]
300
  annotation.update(image_info)
301
  annotation["id"] = _id
302
 
303
+ # Add the category_ids to the annotation
304
+ annotation["category_ids"] = cat_ids_dict[annotation["image_id"]] if annotation["image_id"] in cat_ids_dict else []
305
+
306
+ annotation['category_one_hot'] = torch.zeros(len(self.classes))
307
+ for category_id in annotation["category_ids"]:
308
+ # Get index of category_id in self.classes
309
+ index = next((index for (index, d) in enumerate(self.classes) if d["id"] == category_id), None)
310
+ annotation['category_one_hot'][index] = 1
311
+
312
  entries = annotations
313
 
314
  for id_, entry in enumerate(entries):
 
326
  entry = {k: entry[k] for k in _features if k in entry}
327
 
328
  yield str((entry["image_id"], entry["caption_id"])), entry
329
+
330
+ from datasets import load_dataset
331
+ if __name__ == "__main__":
332
+ dataset = load_dataset(
333
+ "coco_dataset_multi_label_script/coco_dataset_multi_label_script.py",
334
+ "2017",
335
+ keep_in_memory=False,
336
+ splits=["valid"],
337
+ data_dir="/workspace/pixt/clip-training/data/mscoco",
338
+ )
339
+ print(dataset["validation"][0])