carsonzhang commited on
Commit
84bbc46
·
verified ·
1 Parent(s): 6d099d4

Upload 3 files

Browse files
01-download_melanoma.R ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ library(here)
2
+
3
+ training_jpeg_images_url = "https://isic-challenge-data.s3.amazonaws.com/2020/ISIC_2020_Training_JPEG.zip"
4
+ training_metadata_url = "https://isic-challenge-data.s3.amazonaws.com/2020/ISIC_2020_Training_GroundTruth.csv"
5
+ training_metadata_v2_url = "https://isic-challenge-data.s3.amazonaws.com/2020/ISIC_2020_Training_GroundTruth_v2.csv"
6
+ training_duplicate_image_list_url = "https://isic-challenge-data.s3.amazonaws.com/2020/ISIC_2020_Training_Duplicates.csv"
7
+
8
+ test_jpeg_images_url = "https://isic-challenge-data.s3.amazonaws.com/2020/ISIC_2020_Test_JPEG.zip"
9
+ test_metadata_url = "https://isic-challenge-data.s3.amazonaws.com/2020/ISIC_2020_Test_Metadata.csv"
10
+
11
+
12
+ urls = c(
13
+ training_jpeg_images_url,
14
+ training_metadata_url, training_metadata_v2_url, training_duplicate_image_list_url,
15
+ test_jpeg_images_url,
16
+ test_metadata_url
17
+ )
18
+
19
+ cache_dir = here("cache")
20
+ download_melanoma_file = function(url) {
21
+ op = options(timeout = 36000)
22
+ on.exit(options(op))
23
+
24
+ download.file(url, here(cache_dir, basename(url)))
25
+ }
26
+
27
+ mlr3misc::walk(urls, download_melanoma_file)
28
+
29
+ unzip(here(cache_dir, basename(training_jpeg_images_url)))
30
+ unzip(here(cache_dir, basename(test_jpeg_images_url)))
02-resize_melanoma.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from tqdm import tqdm
4
+ import torchvision
5
+
6
+ PATH_TO_MLR3TORCH = "."
7
+ cache_dir = "cache"
8
+
9
+ path_to_melanoma_train = os.path.join(PATH_TO_MLR3TORCH, cache_dir, "train")
10
+ path_to_melanoma_test = os.path.join(PATH_TO_MLR3TORCH, cache_dir, "ISIC_2020_Test_Input")
11
+
12
+ path_to_output_train = os.path.join(PATH_TO_MLR3TORCH, cache_dir, "hf_dataset", "train")
13
+ path_to_output_test = os.path.join(PATH_TO_MLR3TORCH, cache_dir, "hf_dataset", "ISIC_2020_Test_Input")
14
+
15
+ os.makedirs(path_to_output_train)
16
+ os.makedirs(path_to_output_test)
17
+
18
+ tx = torchvision.transforms.Resize((128, 128))
19
+
20
+ for f in tqdm(os.listdir(path_to_melanoma_train)):
21
+ img = torchvision.io.read_image(os.path.join(path_to_melanoma_train, f))
22
+ small_img = tx(img.float() / 255)
23
+ torchvision.utils.save_image(small_img, os.path.join(path_to_output_train, f))
24
+
25
+ for f in tqdm(os.listdir(path_to_melanoma_test)):
26
+ if f.endswith(".jpg"):
27
+ img = torchvision.io.read_image(os.path.join(path_to_melanoma_test, f))
28
+ small_img = tx(img.float() / 255)
29
+ torchvision.utils.save_image(small_img, os.path.join(path_to_output_test, f))
03-process_melanoma.R ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ library(data.table)
2
+ library(tidytable)
3
+ library(purrr)
4
+
5
+ library(here)
6
+
7
+ library(fs)
8
+
9
+ # this script changes the data into the format expected by Hugging Face
10
+ # It expects that you have downloaded and extracted the original data by running the download_melanoma.R script
11
+ # and that you have already resized it with PyTorch
12
+
13
+ cache_dir = here("cache")
14
+
15
+ duplicates = fread(here(cache_dir, "ISIC_2020_Training_Duplicates.csv"))
16
+
17
+ metadata_file_paths = c(
18
+ here(cache_dir, "ISIC_2020_Training_GroundTruth.csv"),
19
+ here(cache_dir, "ISIC_2020_Training_GroundTruth_v2.csv"),
20
+ here(cache_dir, "ISIC_2020_Test_Metadata.csv")
21
+ )
22
+ metadata_dt_list = map(metadata_file_paths, fread)
23
+ metadata_dt_list[[3]] = rename(metadata_dt_list[[3]], image_name = image)
24
+
25
+ # deduplicate the metadata
26
+ dedup = function(metadata_dt, duplicate_file_names) {
27
+ metadata_dt[!(image_name %in% duplicate_file_names), ]
28
+ }
29
+
30
+ training_metadata = dedup(metadata_dt_list[[1]], duplicates$image_name_2)
31
+ training_metadata_v2 = dedup(metadata_dt_list[[2]], duplicates$image_name_2)
32
+ test_metadata = metadata_dt_list[[3]]
33
+
34
+ hf_dataset_dir = here(cache_dir, "hf_dataset")
35
+ hf_train_dir = here(hf_dataset_dir, "train")
36
+ hf_test_dir = here(hf_dataset_dir, "ISIC_2020_Test_Input")
37
+
38
+ train_dirnames_for_each_img = paste0("train", (training_metadata_v2[, .I] %% 4) + 1)
39
+ test_dirnames_for_each_img = paste0("ISIC_2020_Test_Input", (test_metadata[, .I] %% 2) + 1)
40
+
41
+ # add a column that Hugging Face wants
42
+ add_hf_file_name_col = function(metadata_dt, image_relative_dirnames) {
43
+ metadata_dt[, file_name := paste0(file.path(image_relative_dirnames, metadata_dt$image_name), ".jpg")]
44
+ }
45
+
46
+ # image_relative_paths = c("train", "train", "ISIC_2020_Test_Input")
47
+
48
+ add_hf_file_name_col(training_metadata, train_dirnames_for_each_img)
49
+ add_hf_file_name_col(training_metadata_v2, train_dirnames_for_each_img)
50
+ add_hf_file_name_col(metadata_dt_list[[3]], test_dirnames_for_each_img)
51
+
52
+ # delete the duplicated images
53
+ list.files(hf_train_dir) |> length()
54
+ file.remove(here(hf_train_dir, paste0(duplicates$image_name_2, ".jpg")))
55
+ list.files(hf_train_dir) |> length()
56
+
57
+ old_names = function(metadata_dt, dir) {
58
+ paste0(file.path(dir, metadata_dt$image_name), ".jpg")
59
+ }
60
+
61
+ create_if_necessary = function(dirname) {
62
+ if (!dir.exists(dirname)) {
63
+ dir.create(dirname)
64
+ }
65
+ }
66
+
67
+ walk(here(hf_dataset_dir, unique(train_dirnames_for_each_img)), create_if_necessary)
68
+ walk(here(hf_dataset_dir, unique(test_dirnames_for_each_img)), create_if_necessary)
69
+
70
+ # file_move(old_names(training_metadata), here(hf_dataset_dir, train_dirnames_for_each_img, paste0(training_metadata$image_name, ".jpg")))
71
+ file_move(old_names(training_metadata_v2, hf_train_dir), here(hf_dataset_dir, train_dirnames_for_each_img, paste0(training_metadata_v2$image_name, ".jpg")))
72
+ file_move(old_names(test_metadata, hf_test_dir), here(hf_dataset_dir, test_dirnames_for_each_img, paste0(test_metadata$image_name, ".jpg")))
73
+
74
+ test_metadata = rename(test_metadata, image = image_name)
75
+
76
+ fwrite(training_metadata, here(hf_dataset_dir, "ISIC_2020_Training_GroundTruth.csv"))
77
+ fwrite(training_metadata_v2, here(hf_dataset_dir, "ISIC_2020_Training_GroundTruth_v2.csv"))
78
+ fwrite(test_metadata, here(hf_dataset_dir, "ISIC_2020_Test_Metadata.csv"))
79
+
80
+ # test1 = list.files(here(hf_dataset_dir, "ISIC_2020_Test_Input1"))
81
+ # test2 = list.files(here(hf_dataset_dir, "ISIC_2020_Test_Input2"))
82
+ # setdiff(test1, test2)
83
+
84
+ # test_metadata |> filter(image_name == "ISIC_9999302") |> pull(file_name)