hosseinbv commited on
Commit
dab2297
·
verified ·
1 Parent(s): 3b44b84

Uploading /ephemeral/hossein/output/sam2

Browse files
Files changed (50) hide show
  1. checkpoints/checkpoint.pt +3 -0
  2. checkpoints/checkpoint_1.pt +3 -0
  3. checkpoints/checkpoint_10.pt +3 -0
  4. checkpoints/checkpoint_11.pt +3 -0
  5. checkpoints/checkpoint_12.pt +3 -0
  6. checkpoints/checkpoint_13.pt +3 -0
  7. checkpoints/checkpoint_14.pt +3 -0
  8. checkpoints/checkpoint_15.pt +3 -0
  9. checkpoints/checkpoint_16.pt +3 -0
  10. checkpoints/checkpoint_17.pt +3 -0
  11. checkpoints/checkpoint_18.pt +3 -0
  12. checkpoints/checkpoint_19.pt +3 -0
  13. checkpoints/checkpoint_2.pt +3 -0
  14. checkpoints/checkpoint_20.pt +3 -0
  15. checkpoints/checkpoint_21.pt +3 -0
  16. checkpoints/checkpoint_22.pt +3 -0
  17. checkpoints/checkpoint_23.pt +3 -0
  18. checkpoints/checkpoint_24.pt +3 -0
  19. checkpoints/checkpoint_25.pt +3 -0
  20. checkpoints/checkpoint_26.pt +3 -0
  21. checkpoints/checkpoint_27.pt +3 -0
  22. checkpoints/checkpoint_28.pt +3 -0
  23. checkpoints/checkpoint_29.pt +3 -0
  24. checkpoints/checkpoint_3.pt +3 -0
  25. checkpoints/checkpoint_30.pt +3 -0
  26. checkpoints/checkpoint_31.pt +3 -0
  27. checkpoints/checkpoint_32.pt +3 -0
  28. checkpoints/checkpoint_33.pt +3 -0
  29. checkpoints/checkpoint_34.pt +3 -0
  30. checkpoints/checkpoint_35.pt +3 -0
  31. checkpoints/checkpoint_36.pt +3 -0
  32. checkpoints/checkpoint_37.pt +3 -0
  33. checkpoints/checkpoint_38.pt +3 -0
  34. checkpoints/checkpoint_39.pt +3 -0
  35. checkpoints/checkpoint_4.pt +3 -0
  36. checkpoints/checkpoint_40.pt +3 -0
  37. checkpoints/checkpoint_5.pt +3 -0
  38. checkpoints/checkpoint_6.pt +3 -0
  39. checkpoints/checkpoint_7.pt +3 -0
  40. checkpoints/checkpoint_8.pt +3 -0
  41. checkpoints/checkpoint_9.pt +3 -0
  42. config.yaml +330 -0
  43. config_resolved.yaml +368 -0
  44. logs/best_stats.json +40 -0
  45. logs/log.txt +0 -0
  46. logs/train_stats.json +40 -0
  47. tensorboard/events.out.tfevents.1736037893.creative-turing-2.3136573.0928bfd69-289f-40fa-b70d-977964f79bac +3 -0
  48. tensorboard/events.out.tfevents.1736039984.creative-turing-2.3143541.09b0a183e-0877-483b-8228-cdb52183d13b +3 -0
  49. tensorboard/events.out.tfevents.1736040155.creative-turing-2.3145589.0dbed95eb-8481-4404-acdf-20879f1218f7 +3 -0
  50. tensorboard/events.out.tfevents.1737390155.creative-turing-2.1898139.03b45d984-ad8b-4ade-ab73-ec2387852ef3 +3 -0
checkpoints/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d6d9c415897594ac42cfec1f4dd86a3f4234ab6fb12a3e8be5aae22c74ca17
3
+ size 2694489716
checkpoints/checkpoint_1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7efb95010b7b8df029e30bd4470c2320d7fbcbc4409a0df2e3d70d2a71baf01
3
+ size 2694489652
checkpoints/checkpoint_10.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d10f21cd41fc95979bb572edc8a07a970f3a7eb57e86bbc7f05934949a9575f
3
+ size 2694489716
checkpoints/checkpoint_11.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d0388b72489d8faf86ce98f4b1fdcf5a2acebc5161a688d36d8fe06246c1304
3
+ size 2694489716
checkpoints/checkpoint_12.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec1a82c2a9ce3df09270fa431bd2d7e97c7de72342f7f0f8ee3e31ae94f4255a
3
+ size 2694489716
checkpoints/checkpoint_13.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855edc9b7be5573edca512832f46b11a145796b13486dd970cf216e2f229c148
3
+ size 2694489716
checkpoints/checkpoint_14.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb7a4110c496b971787ed492a70d886cfe3a901c5e9df2db0df67d57d3ba01e9
3
+ size 2694489716
checkpoints/checkpoint_15.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:498dcb06dab133f2e68c23e07864ad250a031b44e04c1e14cccc078040bc096e
3
+ size 2694489716
checkpoints/checkpoint_16.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97723be337ae8e50fc439ce09156c8d5961e05968112ab0fe30544ffefed9a3e
3
+ size 2694489716
checkpoints/checkpoint_17.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5366b10e7a9221c32dd062b613ae16fb78f3b032d50f597714a69b6cb8ea6b32
3
+ size 2694489716
checkpoints/checkpoint_18.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f7c95cd31b9b164bd2d22ee0a1c5a4d095fc1690a87c7aca43db062cad092e1
3
+ size 2694489716
checkpoints/checkpoint_19.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfb110f08e14c314d4cd28b60124231c47753c0e38f5edf3df6df8439f3bc7e
3
+ size 2694489716
checkpoints/checkpoint_2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c41a3181fcd70e746e7fd3632c43c2fc49249cee5527b11430c42c22ecc3767
3
+ size 2694489716
checkpoints/checkpoint_20.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a660a992b5fa6dc4f2fce4d58bd4fc62749b805ca96bb34280b1b03c686937
3
+ size 2694489716
checkpoints/checkpoint_21.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1a2ff30542e32c312e9d57f10677dd1fe686f9831ead0dfea0a00dfd6a9e5c
3
+ size 2694489716
checkpoints/checkpoint_22.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba185f5715939b94dc0552225607f53695c3be820e5cb007ad6586c9f115d819
3
+ size 2694489716
checkpoints/checkpoint_23.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ce76f631ba61e12b058909109d29e9bc82f6e5ff5db087e292bfd9a602e437f
3
+ size 2694489716
checkpoints/checkpoint_24.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b70f7c46b231d02273b6b3b2d90c3118f3de36ccc79c4fb308572036ea36a77b
3
+ size 2694489716
checkpoints/checkpoint_25.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dac06e8e0ba6b2d1d55a41e21f8a87b02fa508c6e9091efe8fe7ccfec5c8b26
3
+ size 2694489716
checkpoints/checkpoint_26.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39efef224a0f82c41713408137269a854a4442b6b30d772ccc828dd089cebbc2
3
+ size 2694489716
checkpoints/checkpoint_27.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:364430c6ffe3884d25ae7212bbba36744df2c42657777e80cb7b949eb6e93d7f
3
+ size 2694489716
checkpoints/checkpoint_28.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72820f398662c3958d24bad2b4c1040387b8320a1ee92096375608b6f7491fa5
3
+ size 2694489716
checkpoints/checkpoint_29.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7e0f8158e783e4578e6fd52e77bd0dd70e4a961c9996ffe2f58b3f5b43f2661
3
+ size 2694489716
checkpoints/checkpoint_3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e0ec56c282d2473e4293064a29385bab59bd80961b7203d90d59229eca6cc5
3
+ size 2694489716
checkpoints/checkpoint_30.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf0962c6647baca0cf5b042fe59b983822ecc1e9e2fce809656efba9736a98f0
3
+ size 2694489716
checkpoints/checkpoint_31.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35a08c55b1cab0356449d47391aaede4c4b703f05beff8ff09027c1cdf7d9413
3
+ size 2694489716
checkpoints/checkpoint_32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f6f7f1b70a8b5bf0d8cbd800c068e15d31ebd9b7dec45f86f8764dfa527c986
3
+ size 2694489716
checkpoints/checkpoint_33.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e8cb5a6fb2505cd28e16f862f9e877fbe8aedddc9d268fc2d2abfeb1ad92384
3
+ size 2694489716
checkpoints/checkpoint_34.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d86977b27452eaf2eae3a22e3397cd520bbe998b6a39eca60142c18546d47fc4
3
+ size 2694489716
checkpoints/checkpoint_35.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db63d692de757cf35175cf8d6ebc2a1c56ca7847c3260588c3b8ed2cb80a481
3
+ size 2694489716
checkpoints/checkpoint_36.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5207473bfeeac893775c285850969160962cdfdc1df7f02fee8a22dfb9053e9d
3
+ size 2694489716
checkpoints/checkpoint_37.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f5b38a2ab33e767efb0935531af81cda463ef8be722377c78b021a64e451261
3
+ size 2694489716
checkpoints/checkpoint_38.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44337fc480c3d62d87ff34cb386d1b0b1a7e7e9b9fa9abd9196a4f8bef29df50
3
+ size 2694489716
checkpoints/checkpoint_39.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98e3f8e676e40191242d4c2654a63fd54cb4a7eff27f6e0f8e50c6c1f8b509ab
3
+ size 2694489716
checkpoints/checkpoint_4.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31196add528f7fec7e87c248ba15ab30b1958f50d69a00c578a0ea9f06926831
3
+ size 2694489716
checkpoints/checkpoint_40.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d6d9c415897594ac42cfec1f4dd86a3f4234ab6fb12a3e8be5aae22c74ca17
3
+ size 2694489716
checkpoints/checkpoint_5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f75da0d3cd0b3e4f0468df318ffee3325d5b3ecec59b31e87c856188f2568d53
3
+ size 2694489716
checkpoints/checkpoint_6.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f206b108ca9d77d5ac1c984d31291ba7d8c0b2658c99a5c7de22c792b8dcd92
3
+ size 2694489716
checkpoints/checkpoint_7.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b806b82fab36d0cb6a68ac88be669b6aa77a4f461a5b9aebdadb1912feb05d79
3
+ size 2694489716
checkpoints/checkpoint_8.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:060a8ab901ae15da1b96ad4eb7141e82c58b8db71382eceaf4606399258de98f
3
+ size 2694489716
checkpoints/checkpoint_9.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87577e1a69550af503bd9089c6635cb1cbe497b677f4d703cc74eafe49cdaf6d
3
+ size 2694489716
config.yaml ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ scratch:
2
+ resolution: 1024
3
+ train_batch_size: 1
4
+ num_train_workers: 3
5
+ num_frames: 8
6
+ max_num_objects: 4
7
+ base_lr: 5.0e-06
8
+ vision_lr: 3.0e-06
9
+ phases_per_epoch: 1
10
+ num_epochs: 40
11
+ dataset:
12
+ img_folder: /home/hossein/hossein/projects/sam2/training/ptmc-data/JPEGImages
13
+ gt_folder: /home/hossein/hossein/projects/sam2/training/ptmc-data/Annotations
14
+ file_list_txt: null
15
+ multiplier: 2
16
+ vos:
17
+ train_transforms:
18
+ - _target_: training.dataset.transforms.ComposeAPI
19
+ transforms:
20
+ - _target_: training.dataset.transforms.RandomHorizontalFlip
21
+ consistent_transform: true
22
+ - _target_: training.dataset.transforms.RandomAffine
23
+ degrees: 25
24
+ shear: 20
25
+ image_interpolation: bilinear
26
+ consistent_transform: true
27
+ - _target_: training.dataset.transforms.RandomResizeAPI
28
+ sizes: ${scratch.resolution}
29
+ square: true
30
+ consistent_transform: true
31
+ - _target_: training.dataset.transforms.ColorJitter
32
+ consistent_transform: true
33
+ brightness: 0.1
34
+ contrast: 0.03
35
+ saturation: 0.03
36
+ hue: null
37
+ - _target_: training.dataset.transforms.RandomGrayscale
38
+ p: 0.05
39
+ consistent_transform: true
40
+ - _target_: training.dataset.transforms.ColorJitter
41
+ consistent_transform: false
42
+ brightness: 0.1
43
+ contrast: 0.05
44
+ saturation: 0.05
45
+ hue: null
46
+ - _target_: training.dataset.transforms.ToTensorAPI
47
+ - _target_: training.dataset.transforms.NormalizeAPI
48
+ mean:
49
+ - 0.485
50
+ - 0.456
51
+ - 0.406
52
+ std:
53
+ - 0.229
54
+ - 0.224
55
+ - 0.225
56
+ trainer:
57
+ _target_: training.trainer.Trainer
58
+ mode: train_only
59
+ max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}}
60
+ accelerator: cuda
61
+ seed_value: 123
62
+ model:
63
+ _target_: training.model.sam2.SAM2Train
64
+ image_encoder:
65
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
66
+ scalp: 1
67
+ trunk:
68
+ _target_: sam2.modeling.backbones.hieradet.Hiera
69
+ embed_dim: 144
70
+ num_heads: 2
71
+ stages:
72
+ - 2
73
+ - 6
74
+ - 36
75
+ - 4
76
+ global_att_blocks:
77
+ - 23
78
+ - 33
79
+ - 43
80
+ window_pos_embed_bkg_spatial_size:
81
+ - 7
82
+ - 7
83
+ window_spec:
84
+ - 8
85
+ - 4
86
+ - 16
87
+ - 8
88
+ neck:
89
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
90
+ position_encoding:
91
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
92
+ num_pos_feats: 256
93
+ normalize: true
94
+ scale: null
95
+ temperature: 10000
96
+ d_model: 256
97
+ backbone_channel_list:
98
+ - 1152
99
+ - 576
100
+ - 288
101
+ - 144
102
+ fpn_top_down_levels:
103
+ - 2
104
+ - 3
105
+ fpn_interp_model: nearest
106
+ memory_attention:
107
+ _target_: sam2.modeling.memory_attention.MemoryAttention
108
+ d_model: 256
109
+ pos_enc_at_input: true
110
+ layer:
111
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
112
+ activation: relu
113
+ dim_feedforward: 2048
114
+ dropout: 0.1
115
+ pos_enc_at_attn: false
116
+ self_attention:
117
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
118
+ rope_theta: 10000.0
119
+ feat_sizes:
120
+ - 64
121
+ - 64
122
+ embedding_dim: 256
123
+ num_heads: 1
124
+ downsample_rate: 1
125
+ dropout: 0.1
126
+ d_model: 256
127
+ pos_enc_at_cross_attn_keys: true
128
+ pos_enc_at_cross_attn_queries: false
129
+ cross_attention:
130
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
131
+ rope_theta: 10000.0
132
+ feat_sizes:
133
+ - 64
134
+ - 64
135
+ rope_k_repeat: true
136
+ embedding_dim: 256
137
+ num_heads: 1
138
+ downsample_rate: 1
139
+ dropout: 0.1
140
+ kv_in_dim: 64
141
+ num_layers: 4
142
+ memory_encoder:
143
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
144
+ out_dim: 64
145
+ position_encoding:
146
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
147
+ num_pos_feats: 64
148
+ normalize: true
149
+ scale: null
150
+ temperature: 10000
151
+ mask_downsampler:
152
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
153
+ kernel_size: 3
154
+ stride: 2
155
+ padding: 1
156
+ fuser:
157
+ _target_: sam2.modeling.memory_encoder.Fuser
158
+ layer:
159
+ _target_: sam2.modeling.memory_encoder.CXBlock
160
+ dim: 256
161
+ kernel_size: 7
162
+ padding: 3
163
+ layer_scale_init_value: 1.0e-06
164
+ use_dwconv: true
165
+ num_layers: 2
166
+ num_maskmem: 7
167
+ image_size: ${scratch.resolution}
168
+ sigmoid_scale_for_mem_enc: 20.0
169
+ sigmoid_bias_for_mem_enc: -10.0
170
+ use_mask_input_as_output_without_sam: true
171
+ directly_add_no_mem_embed: true
172
+ no_obj_embed_spatial: true
173
+ use_high_res_features_in_sam: true
174
+ multimask_output_in_sam: true
175
+ iou_prediction_use_sigmoid: true
176
+ use_obj_ptrs_in_encoder: true
177
+ add_tpos_enc_to_obj_ptrs: true
178
+ proj_tpos_enc_in_obj_ptrs: true
179
+ use_signed_tpos_enc_to_obj_ptrs: true
180
+ only_obj_ptrs_in_the_past_for_eval: true
181
+ pred_obj_scores: true
182
+ pred_obj_scores_mlp: true
183
+ fixed_no_obj_ptr: true
184
+ multimask_output_for_tracking: true
185
+ use_multimask_token_for_obj_ptr: true
186
+ multimask_min_pt_num: 0
187
+ multimask_max_pt_num: 1
188
+ use_mlp_for_obj_ptr_proj: true
189
+ compile_image_encoder: false
190
+ prob_to_use_pt_input_for_train: 0.5
191
+ prob_to_use_pt_input_for_eval: 0.0
192
+ prob_to_use_box_input_for_train: 0.5
193
+ prob_to_use_box_input_for_eval: 0.0
194
+ prob_to_sample_from_gt_for_train: 0.1
195
+ num_frames_to_correct_for_train: 2
196
+ num_frames_to_correct_for_eval: 1
197
+ rand_frames_to_correct_for_train: true
198
+ add_all_frames_to_correct_as_cond: true
199
+ num_init_cond_frames_for_train: 2
200
+ rand_init_cond_frames_for_train: true
201
+ num_correction_pt_per_frame: 7
202
+ use_act_ckpt_iterative_pt_sampling: false
203
+ num_init_cond_frames_for_eval: 1
204
+ forward_backbone_per_frame_for_eval: true
205
+ data:
206
+ train:
207
+ _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset
208
+ phases_per_epoch: ${scratch.phases_per_epoch}
209
+ batch_sizes:
210
+ - ${scratch.train_batch_size}
211
+ datasets:
212
+ - _target_: training.dataset.utils.RepeatFactorWrapper
213
+ dataset:
214
+ _target_: training.dataset.utils.ConcatDataset
215
+ datasets:
216
+ - _target_: training.dataset.vos_dataset.VOSDataset
217
+ transforms: ${vos.train_transforms}
218
+ training: true
219
+ video_dataset:
220
+ _target_: training.dataset.vos_raw_dataset.PNGRawDataset
221
+ img_folder: ${dataset.img_folder}
222
+ gt_folder: ${dataset.gt_folder}
223
+ file_list_txt: ${dataset.file_list_txt}
224
+ sampler:
225
+ _target_: training.dataset.vos_sampler.RandomUniformSampler
226
+ num_frames: ${scratch.num_frames}
227
+ max_num_objects: ${scratch.max_num_objects}
228
+ multiplier: ${dataset.multiplier}
229
+ shuffle: true
230
+ num_workers: ${scratch.num_train_workers}
231
+ pin_memory: true
232
+ drop_last: true
233
+ collate_fn:
234
+ _target_: training.utils.data_utils.collate_fn
235
+ _partial_: true
236
+ dict_key: all
237
+ optim:
238
+ amp:
239
+ enabled: true
240
+ amp_dtype: bfloat16
241
+ optimizer:
242
+ _target_: torch.optim.AdamW
243
+ gradient_clip:
244
+ _target_: training.optimizer.GradientClipper
245
+ max_norm: 0.1
246
+ norm_type: 2
247
+ param_group_modifiers:
248
+ - _target_: training.optimizer.layer_decay_param_modifier
249
+ _partial_: true
250
+ layer_decay_value: 0.9
251
+ apply_to: image_encoder.trunk
252
+ overrides:
253
+ - pattern: '*pos_embed*'
254
+ value: 1.0
255
+ options:
256
+ lr:
257
+ - scheduler:
258
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
259
+ start_value: ${scratch.base_lr}
260
+ end_value: ${divide:${scratch.base_lr},10}
261
+ - scheduler:
262
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
263
+ start_value: ${scratch.vision_lr}
264
+ end_value: ${divide:${scratch.vision_lr},10}
265
+ param_names:
266
+ - image_encoder.*
267
+ weight_decay:
268
+ - scheduler:
269
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
270
+ value: 0.1
271
+ - scheduler:
272
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
273
+ value: 0.0
274
+ param_names:
275
+ - '*bias*'
276
+ module_cls_names:
277
+ - torch.nn.LayerNorm
278
+ loss:
279
+ all:
280
+ _target_: training.loss_fns.MultiStepMultiMasksAndIous
281
+ weight_dict:
282
+ loss_mask: 20
283
+ loss_dice: 1
284
+ loss_iou: 1
285
+ loss_class: 1
286
+ supervise_all_iou: true
287
+ iou_use_l1_loss: true
288
+ pred_obj_scores: true
289
+ focal_gamma_obj_score: 0.0
290
+ focal_alpha_obj_score: -1.0
291
+ distributed:
292
+ backend: nccl
293
+ find_unused_parameters: true
294
+ logging:
295
+ tensorboard_writer:
296
+ _target_: training.utils.logger.make_tensorboard_logger
297
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
298
+ flush_secs: 120
299
+ should_log: true
300
+ log_dir: ${launcher.experiment_log_dir}/logs
301
+ log_freq: 10
302
+ checkpoint:
303
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
304
+ save_freq: 1
305
+ model_weight_initializer:
306
+ _partial_: true
307
+ _target_: training.utils.checkpoint_utils.load_state_dict_into_model
308
+ strict: true
309
+ ignore_unexpected_keys: null
310
+ ignore_missing_keys: null
311
+ state_dict:
312
+ _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels
313
+ checkpoint_path: /home/hossein/hossein/projects/sam2/checkpoints/sam2.1_hiera_large.pt
314
+ ckpt_state_dict_keys:
315
+ - model
316
+ launcher:
317
+ num_nodes: 1
318
+ gpus_per_node: 4
319
+ experiment_log_dir: /ephemeral/hossein/output/sam2
320
+ submitit:
321
+ partition: null
322
+ account: null
323
+ qos: null
324
+ cpus_per_task: 10
325
+ use_cluster: false
326
+ timeout_hour: 24
327
+ name: null
328
+ port_range:
329
+ - 10000
330
+ - 65000
config_resolved.yaml ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ scratch:
2
+ resolution: 1024
3
+ train_batch_size: 1
4
+ num_train_workers: 3
5
+ num_frames: 8
6
+ max_num_objects: 4
7
+ base_lr: 5.0e-06
8
+ vision_lr: 3.0e-06
9
+ phases_per_epoch: 1
10
+ num_epochs: 40
11
+ dataset:
12
+ img_folder: /home/hossein/hossein/projects/sam2/training/ptmc-data/JPEGImages
13
+ gt_folder: /home/hossein/hossein/projects/sam2/training/ptmc-data/Annotations
14
+ file_list_txt: null
15
+ multiplier: 2
16
+ vos:
17
+ train_transforms:
18
+ - _target_: training.dataset.transforms.ComposeAPI
19
+ transforms:
20
+ - _target_: training.dataset.transforms.RandomHorizontalFlip
21
+ consistent_transform: true
22
+ - _target_: training.dataset.transforms.RandomAffine
23
+ degrees: 25
24
+ shear: 20
25
+ image_interpolation: bilinear
26
+ consistent_transform: true
27
+ - _target_: training.dataset.transforms.RandomResizeAPI
28
+ sizes: 1024
29
+ square: true
30
+ consistent_transform: true
31
+ - _target_: training.dataset.transforms.ColorJitter
32
+ consistent_transform: true
33
+ brightness: 0.1
34
+ contrast: 0.03
35
+ saturation: 0.03
36
+ hue: null
37
+ - _target_: training.dataset.transforms.RandomGrayscale
38
+ p: 0.05
39
+ consistent_transform: true
40
+ - _target_: training.dataset.transforms.ColorJitter
41
+ consistent_transform: false
42
+ brightness: 0.1
43
+ contrast: 0.05
44
+ saturation: 0.05
45
+ hue: null
46
+ - _target_: training.dataset.transforms.ToTensorAPI
47
+ - _target_: training.dataset.transforms.NormalizeAPI
48
+ mean:
49
+ - 0.485
50
+ - 0.456
51
+ - 0.406
52
+ std:
53
+ - 0.229
54
+ - 0.224
55
+ - 0.225
56
+ trainer:
57
+ _target_: training.trainer.Trainer
58
+ mode: train_only
59
+ max_epochs: 40
60
+ accelerator: cuda
61
+ seed_value: 123
62
+ model:
63
+ _target_: training.model.sam2.SAM2Train
64
+ image_encoder:
65
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
66
+ scalp: 1
67
+ trunk:
68
+ _target_: sam2.modeling.backbones.hieradet.Hiera
69
+ embed_dim: 144
70
+ num_heads: 2
71
+ stages:
72
+ - 2
73
+ - 6
74
+ - 36
75
+ - 4
76
+ global_att_blocks:
77
+ - 23
78
+ - 33
79
+ - 43
80
+ window_pos_embed_bkg_spatial_size:
81
+ - 7
82
+ - 7
83
+ window_spec:
84
+ - 8
85
+ - 4
86
+ - 16
87
+ - 8
88
+ neck:
89
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
90
+ position_encoding:
91
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
92
+ num_pos_feats: 256
93
+ normalize: true
94
+ scale: null
95
+ temperature: 10000
96
+ d_model: 256
97
+ backbone_channel_list:
98
+ - 1152
99
+ - 576
100
+ - 288
101
+ - 144
102
+ fpn_top_down_levels:
103
+ - 2
104
+ - 3
105
+ fpn_interp_model: nearest
106
+ memory_attention:
107
+ _target_: sam2.modeling.memory_attention.MemoryAttention
108
+ d_model: 256
109
+ pos_enc_at_input: true
110
+ layer:
111
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
112
+ activation: relu
113
+ dim_feedforward: 2048
114
+ dropout: 0.1
115
+ pos_enc_at_attn: false
116
+ self_attention:
117
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
118
+ rope_theta: 10000.0
119
+ feat_sizes:
120
+ - 64
121
+ - 64
122
+ embedding_dim: 256
123
+ num_heads: 1
124
+ downsample_rate: 1
125
+ dropout: 0.1
126
+ d_model: 256
127
+ pos_enc_at_cross_attn_keys: true
128
+ pos_enc_at_cross_attn_queries: false
129
+ cross_attention:
130
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
131
+ rope_theta: 10000.0
132
+ feat_sizes:
133
+ - 64
134
+ - 64
135
+ rope_k_repeat: true
136
+ embedding_dim: 256
137
+ num_heads: 1
138
+ downsample_rate: 1
139
+ dropout: 0.1
140
+ kv_in_dim: 64
141
+ num_layers: 4
142
+ memory_encoder:
143
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
144
+ out_dim: 64
145
+ position_encoding:
146
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
147
+ num_pos_feats: 64
148
+ normalize: true
149
+ scale: null
150
+ temperature: 10000
151
+ mask_downsampler:
152
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
153
+ kernel_size: 3
154
+ stride: 2
155
+ padding: 1
156
+ fuser:
157
+ _target_: sam2.modeling.memory_encoder.Fuser
158
+ layer:
159
+ _target_: sam2.modeling.memory_encoder.CXBlock
160
+ dim: 256
161
+ kernel_size: 7
162
+ padding: 3
163
+ layer_scale_init_value: 1.0e-06
164
+ use_dwconv: true
165
+ num_layers: 2
166
+ num_maskmem: 7
167
+ image_size: 1024
168
+ sigmoid_scale_for_mem_enc: 20.0
169
+ sigmoid_bias_for_mem_enc: -10.0
170
+ use_mask_input_as_output_without_sam: true
171
+ directly_add_no_mem_embed: true
172
+ no_obj_embed_spatial: true
173
+ use_high_res_features_in_sam: true
174
+ multimask_output_in_sam: true
175
+ iou_prediction_use_sigmoid: true
176
+ use_obj_ptrs_in_encoder: true
177
+ add_tpos_enc_to_obj_ptrs: true
178
+ proj_tpos_enc_in_obj_ptrs: true
179
+ use_signed_tpos_enc_to_obj_ptrs: true
180
+ only_obj_ptrs_in_the_past_for_eval: true
181
+ pred_obj_scores: true
182
+ pred_obj_scores_mlp: true
183
+ fixed_no_obj_ptr: true
184
+ multimask_output_for_tracking: true
185
+ use_multimask_token_for_obj_ptr: true
186
+ multimask_min_pt_num: 0
187
+ multimask_max_pt_num: 1
188
+ use_mlp_for_obj_ptr_proj: true
189
+ compile_image_encoder: false
190
+ prob_to_use_pt_input_for_train: 0.5
191
+ prob_to_use_pt_input_for_eval: 0.0
192
+ prob_to_use_box_input_for_train: 0.5
193
+ prob_to_use_box_input_for_eval: 0.0
194
+ prob_to_sample_from_gt_for_train: 0.1
195
+ num_frames_to_correct_for_train: 2
196
+ num_frames_to_correct_for_eval: 1
197
+ rand_frames_to_correct_for_train: true
198
+ add_all_frames_to_correct_as_cond: true
199
+ num_init_cond_frames_for_train: 2
200
+ rand_init_cond_frames_for_train: true
201
+ num_correction_pt_per_frame: 7
202
+ use_act_ckpt_iterative_pt_sampling: false
203
+ num_init_cond_frames_for_eval: 1
204
+ forward_backbone_per_frame_for_eval: true
205
+ data:
206
+ train:
207
+ _target_: training.dataset.sam2_datasets.TorchTrainMixedDataset
208
+ phases_per_epoch: 1
209
+ batch_sizes:
210
+ - 1
211
+ datasets:
212
+ - _target_: training.dataset.utils.RepeatFactorWrapper
213
+ dataset:
214
+ _target_: training.dataset.utils.ConcatDataset
215
+ datasets:
216
+ - _target_: training.dataset.vos_dataset.VOSDataset
217
+ transforms:
218
+ - _target_: training.dataset.transforms.ComposeAPI
219
+ transforms:
220
+ - _target_: training.dataset.transforms.RandomHorizontalFlip
221
+ consistent_transform: true
222
+ - _target_: training.dataset.transforms.RandomAffine
223
+ degrees: 25
224
+ shear: 20
225
+ image_interpolation: bilinear
226
+ consistent_transform: true
227
+ - _target_: training.dataset.transforms.RandomResizeAPI
228
+ sizes: 1024
229
+ square: true
230
+ consistent_transform: true
231
+ - _target_: training.dataset.transforms.ColorJitter
232
+ consistent_transform: true
233
+ brightness: 0.1
234
+ contrast: 0.03
235
+ saturation: 0.03
236
+ hue: null
237
+ - _target_: training.dataset.transforms.RandomGrayscale
238
+ p: 0.05
239
+ consistent_transform: true
240
+ - _target_: training.dataset.transforms.ColorJitter
241
+ consistent_transform: false
242
+ brightness: 0.1
243
+ contrast: 0.05
244
+ saturation: 0.05
245
+ hue: null
246
+ - _target_: training.dataset.transforms.ToTensorAPI
247
+ - _target_: training.dataset.transforms.NormalizeAPI
248
+ mean:
249
+ - 0.485
250
+ - 0.456
251
+ - 0.406
252
+ std:
253
+ - 0.229
254
+ - 0.224
255
+ - 0.225
256
+ training: true
257
+ video_dataset:
258
+ _target_: training.dataset.vos_raw_dataset.PNGRawDataset
259
+ img_folder: /home/hossein/hossein/projects/sam2/training/ptmc-data/JPEGImages
260
+ gt_folder: /home/hossein/hossein/projects/sam2/training/ptmc-data/Annotations
261
+ file_list_txt: null
262
+ sampler:
263
+ _target_: training.dataset.vos_sampler.RandomUniformSampler
264
+ num_frames: 8
265
+ max_num_objects: 4
266
+ multiplier: 2
267
+ shuffle: true
268
+ num_workers: 3
269
+ pin_memory: true
270
+ drop_last: true
271
+ collate_fn:
272
+ _target_: training.utils.data_utils.collate_fn
273
+ _partial_: true
274
+ dict_key: all
275
+ optim:
276
+ amp:
277
+ enabled: true
278
+ amp_dtype: bfloat16
279
+ optimizer:
280
+ _target_: torch.optim.AdamW
281
+ gradient_clip:
282
+ _target_: training.optimizer.GradientClipper
283
+ max_norm: 0.1
284
+ norm_type: 2
285
+ param_group_modifiers:
286
+ - _target_: training.optimizer.layer_decay_param_modifier
287
+ _partial_: true
288
+ layer_decay_value: 0.9
289
+ apply_to: image_encoder.trunk
290
+ overrides:
291
+ - pattern: '*pos_embed*'
292
+ value: 1.0
293
+ options:
294
+ lr:
295
+ - scheduler:
296
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
297
+ start_value: 5.0e-06
298
+ end_value: 5.000000000000001e-07
299
+ - scheduler:
300
+ _target_: fvcore.common.param_scheduler.CosineParamScheduler
301
+ start_value: 3.0e-06
302
+ end_value: 3.0e-07
303
+ param_names:
304
+ - image_encoder.*
305
+ weight_decay:
306
+ - scheduler:
307
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
308
+ value: 0.1
309
+ - scheduler:
310
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
311
+ value: 0.0
312
+ param_names:
313
+ - '*bias*'
314
+ module_cls_names:
315
+ - torch.nn.LayerNorm
316
+ loss:
317
+ all:
318
+ _target_: training.loss_fns.MultiStepMultiMasksAndIous
319
+ weight_dict:
320
+ loss_mask: 20
321
+ loss_dice: 1
322
+ loss_iou: 1
323
+ loss_class: 1
324
+ supervise_all_iou: true
325
+ iou_use_l1_loss: true
326
+ pred_obj_scores: true
327
+ focal_gamma_obj_score: 0.0
328
+ focal_alpha_obj_score: -1.0
329
+ distributed:
330
+ backend: nccl
331
+ find_unused_parameters: true
332
+ logging:
333
+ tensorboard_writer:
334
+ _target_: training.utils.logger.make_tensorboard_logger
335
+ log_dir: /ephemeral/hossein/output/sam2/tensorboard
336
+ flush_secs: 120
337
+ should_log: true
338
+ log_dir: /ephemeral/hossein/output/sam2/logs
339
+ log_freq: 10
340
+ checkpoint:
341
+ save_dir: /ephemeral/hossein/output/sam2/checkpoints
342
+ save_freq: 1
343
+ model_weight_initializer:
344
+ _partial_: true
345
+ _target_: training.utils.checkpoint_utils.load_state_dict_into_model
346
+ strict: true
347
+ ignore_unexpected_keys: null
348
+ ignore_missing_keys: null
349
+ state_dict:
350
+ _target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels
351
+ checkpoint_path: /home/hossein/hossein/projects/sam2/checkpoints/sam2.1_hiera_large.pt
352
+ ckpt_state_dict_keys:
353
+ - model
354
+ launcher:
355
+ num_nodes: 1
356
+ gpus_per_node: 4
357
+ experiment_log_dir: /ephemeral/hossein/output/sam2
358
+ submitit:
359
+ partition: null
360
+ account: null
361
+ qos: null
362
+ cpus_per_task: 10
363
+ use_cluster: false
364
+ timeout_hour: 24
365
+ name: null
366
+ port_range:
367
+ - 10000
368
+ - 65000
logs/best_stats.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"Trainer/where": 0.024285714285714285, "Trainer/epoch": 0, "Trainer/steps_train": 35}
2
+ {"Trainer/where": 0.04928571428571429, "Trainer/epoch": 1, "Trainer/steps_train": 70}
3
+ {"Trainer/where": 0.07428571428571429, "Trainer/epoch": 2, "Trainer/steps_train": 105}
4
+ {"Trainer/where": 0.09928571428571428, "Trainer/epoch": 3, "Trainer/steps_train": 140}
5
+ {"Trainer/where": 0.12428571428571429, "Trainer/epoch": 4, "Trainer/steps_train": 175}
6
+ {"Trainer/where": 0.1492857142857143, "Trainer/epoch": 5, "Trainer/steps_train": 210}
7
+ {"Trainer/where": 0.1742857142857143, "Trainer/epoch": 6, "Trainer/steps_train": 245}
8
+ {"Trainer/where": 0.1992857142857143, "Trainer/epoch": 7, "Trainer/steps_train": 280}
9
+ {"Trainer/where": 0.22428571428571428, "Trainer/epoch": 8, "Trainer/steps_train": 315}
10
+ {"Trainer/where": 0.24928571428571428, "Trainer/epoch": 9, "Trainer/steps_train": 350}
11
+ {"Trainer/where": 0.2742857142857143, "Trainer/epoch": 10, "Trainer/steps_train": 385}
12
+ {"Trainer/where": 0.29928571428571427, "Trainer/epoch": 11, "Trainer/steps_train": 420}
13
+ {"Trainer/where": 0.3242857142857143, "Trainer/epoch": 12, "Trainer/steps_train": 455}
14
+ {"Trainer/where": 0.3492857142857143, "Trainer/epoch": 13, "Trainer/steps_train": 490}
15
+ {"Trainer/where": 0.3742857142857143, "Trainer/epoch": 14, "Trainer/steps_train": 525}
16
+ {"Trainer/where": 0.3992857142857143, "Trainer/epoch": 15, "Trainer/steps_train": 560}
17
+ {"Trainer/where": 0.42428571428571427, "Trainer/epoch": 16, "Trainer/steps_train": 595}
18
+ {"Trainer/where": 0.4492857142857143, "Trainer/epoch": 17, "Trainer/steps_train": 630}
19
+ {"Trainer/where": 0.4742857142857143, "Trainer/epoch": 18, "Trainer/steps_train": 665}
20
+ {"Trainer/where": 0.4992857142857143, "Trainer/epoch": 19, "Trainer/steps_train": 700}
21
+ {"Trainer/where": 0.5242857142857142, "Trainer/epoch": 20, "Trainer/steps_train": 735}
22
+ {"Trainer/where": 0.5492857142857143, "Trainer/epoch": 21, "Trainer/steps_train": 770}
23
+ {"Trainer/where": 0.5742857142857143, "Trainer/epoch": 22, "Trainer/steps_train": 805}
24
+ {"Trainer/where": 0.5992857142857143, "Trainer/epoch": 23, "Trainer/steps_train": 840}
25
+ {"Trainer/where": 0.6242857142857143, "Trainer/epoch": 24, "Trainer/steps_train": 875}
26
+ {"Trainer/where": 0.6492857142857142, "Trainer/epoch": 25, "Trainer/steps_train": 910}
27
+ {"Trainer/where": 0.6742857142857143, "Trainer/epoch": 26, "Trainer/steps_train": 945}
28
+ {"Trainer/where": 0.6992857142857143, "Trainer/epoch": 27, "Trainer/steps_train": 980}
29
+ {"Trainer/where": 0.7242857142857143, "Trainer/epoch": 28, "Trainer/steps_train": 1015}
30
+ {"Trainer/where": 0.7492857142857143, "Trainer/epoch": 29, "Trainer/steps_train": 1050}
31
+ {"Trainer/where": 0.7742857142857142, "Trainer/epoch": 30, "Trainer/steps_train": 1085}
32
+ {"Trainer/where": 0.7992857142857143, "Trainer/epoch": 31, "Trainer/steps_train": 1120}
33
+ {"Trainer/where": 0.8242857142857142, "Trainer/epoch": 32, "Trainer/steps_train": 1155}
34
+ {"Trainer/where": 0.8492857142857142, "Trainer/epoch": 33, "Trainer/steps_train": 1190}
35
+ {"Trainer/where": 0.8742857142857142, "Trainer/epoch": 34, "Trainer/steps_train": 1225}
36
+ {"Trainer/where": 0.8992857142857142, "Trainer/epoch": 35, "Trainer/steps_train": 1260}
37
+ {"Trainer/where": 0.9242857142857142, "Trainer/epoch": 36, "Trainer/steps_train": 1295}
38
+ {"Trainer/where": 0.9492857142857142, "Trainer/epoch": 37, "Trainer/steps_train": 1330}
39
+ {"Trainer/where": 0.9742857142857142, "Trainer/epoch": 38, "Trainer/steps_train": 1365}
40
+ {"Trainer/where": 0.9992857142857142, "Trainer/epoch": 39, "Trainer/steps_train": 1400}
logs/log.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs/train_stats.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"Losses/train_all_loss": 15.52312662942069, "Losses/train_all_loss_mask": 0.03629592404467985, "Losses/train_all_loss_dice": 10.42937673841204, "Losses/train_all_loss_iou": 1.4414258205597954, "Losses/train_all_loss_class": 2.9264054538948194, "Losses/train_all_core_loss": 15.52312662942069, "Trainer/where": 0.024285714285714285, "Trainer/epoch": 0, "Trainer/steps_train": 35}
2
+ {"Losses/train_all_loss": 13.35472868510655, "Losses/train_all_loss_mask": 0.021235573248954358, "Losses/train_all_loss_dice": 10.641365950448172, "Losses/train_all_loss_iou": 0.7040971059019544, "Losses/train_all_loss_class": 1.5845541834831238, "Losses/train_all_core_loss": 13.35472868510655, "Trainer/where": 0.04928571428571429, "Trainer/epoch": 1, "Trainer/steps_train": 70}
3
+ {"Losses/train_all_loss": 13.056742286682129, "Losses/train_all_loss_mask": 0.011416948284854048, "Losses/train_all_loss_dice": 10.823509979248048, "Losses/train_all_loss_iou": 0.5041234560734925, "Losses/train_all_loss_class": 1.500769633267607, "Losses/train_all_core_loss": 13.056742286682129, "Trainer/where": 0.07428571428571429, "Trainer/epoch": 2, "Trainer/steps_train": 105}
4
+ {"Losses/train_all_loss": 13.252275030953543, "Losses/train_all_loss_mask": 0.0038689142230266173, "Losses/train_all_loss_dice": 10.98888555254255, "Losses/train_all_loss_iou": 0.22835017135366797, "Losses/train_all_loss_class": 1.9576609894633292, "Losses/train_all_core_loss": 13.252275030953543, "Trainer/where": 0.09928571428571428, "Trainer/epoch": 3, "Trainer/steps_train": 140}
5
+ {"Losses/train_all_loss": 13.165853296007429, "Losses/train_all_loss_mask": 0.003397888245152509, "Losses/train_all_loss_dice": 10.91664868763515, "Losses/train_all_loss_iou": 0.17255155175392117, "Losses/train_all_loss_class": 2.00869502723217, "Losses/train_all_core_loss": 13.165853296007429, "Trainer/where": 0.12428571428571429, "Trainer/epoch": 4, "Trainer/steps_train": 175}
6
+ {"Losses/train_all_loss": 12.23187631879534, "Losses/train_all_loss_mask": 0.003290402367877375, "Losses/train_all_loss_dice": 10.583557428632464, "Losses/train_all_loss_iou": 0.1472343360522895, "Losses/train_all_loss_class": 1.4352764577205692, "Losses/train_all_core_loss": 12.23187631879534, "Trainer/where": 0.1492857142857143, "Trainer/epoch": 5, "Trainer/steps_train": 210}
7
+ {"Losses/train_all_loss": 12.532559653690884, "Losses/train_all_loss_mask": 0.00453987946135125, "Losses/train_all_loss_dice": 10.932144478389196, "Losses/train_all_loss_iou": 0.11682707172585652, "Losses/train_all_loss_class": 1.3927903246666704, "Losses/train_all_core_loss": 12.532559653690884, "Trainer/where": 0.1742857142857143, "Trainer/epoch": 6, "Trainer/steps_train": 245}
8
+ {"Losses/train_all_loss": 12.028561905452184, "Losses/train_all_loss_mask": 0.003908876593257966, "Losses/train_all_loss_dice": 9.698760529926846, "Losses/train_all_loss_iou": 0.10878030185870427, "Losses/train_all_loss_class": 2.1428432097658514, "Losses/train_all_core_loss": 12.028561905452184, "Trainer/where": 0.1992857142857143, "Trainer/epoch": 7, "Trainer/steps_train": 280}
9
+ {"Losses/train_all_loss": 12.847667748587472, "Losses/train_all_loss_mask": 0.002982073316317318, "Losses/train_all_loss_dice": 11.120562178747994, "Losses/train_all_loss_iou": 0.07551417656109802, "Losses/train_all_loss_class": 1.5919497845993777, "Losses/train_all_core_loss": 12.847667748587472, "Trainer/where": 0.22428571428571428, "Trainer/epoch": 8, "Trainer/steps_train": 315}
10
+ {"Losses/train_all_loss": 11.098733384268625, "Losses/train_all_loss_mask": 0.007828620975903635, "Losses/train_all_loss_dice": 9.035280391148158, "Losses/train_all_loss_iou": 0.10779441692700077, "Losses/train_all_loss_class": 1.7990862643612282, "Losses/train_all_core_loss": 11.098733384268625, "Trainer/where": 0.24928571428571428, "Trainer/epoch": 9, "Trainer/steps_train": 350}
11
+ {"Losses/train_all_loss": 10.939446571895054, "Losses/train_all_loss_mask": 0.003415139138399224, "Losses/train_all_loss_dice": 9.397412940434046, "Losses/train_all_loss_iou": 0.050908478014337434, "Losses/train_all_loss_class": 1.4228219677827187, "Losses/train_all_core_loss": 10.939446571895054, "Trainer/where": 0.2742857142857143, "Trainer/epoch": 10, "Trainer/steps_train": 385}
12
+ {"Losses/train_all_loss": 11.260014452253069, "Losses/train_all_loss_mask": 0.0037810460206986005, "Losses/train_all_loss_dice": 9.496538509641375, "Losses/train_all_loss_iou": 0.06279493517223662, "Losses/train_all_loss_class": 1.6250600540411793, "Losses/train_all_core_loss": 11.260014452253069, "Trainer/where": 0.29928571428571427, "Trainer/epoch": 11, "Trainer/steps_train": 420}
13
+ {"Losses/train_all_loss": 12.587461267198835, "Losses/train_all_loss_mask": 0.005486930625712765, "Losses/train_all_loss_dice": 10.807248279026576, "Losses/train_all_loss_iou": 0.12015448743186426, "Losses/train_all_loss_class": 1.550319859199226, "Losses/train_all_core_loss": 12.587461267198835, "Trainer/where": 0.3242857142857143, "Trainer/epoch": 12, "Trainer/steps_train": 455}
14
+ {"Losses/train_all_loss": 12.622871085575648, "Losses/train_all_loss_mask": 0.0038172564951569907, "Losses/train_all_loss_dice": 11.162222140175956, "Losses/train_all_loss_iou": 0.05127683051479315, "Losses/train_all_loss_class": 1.3330271471424826, "Losses/train_all_core_loss": 12.622871085575648, "Trainer/where": 0.3492857142857143, "Trainer/epoch": 13, "Trainer/steps_train": 490}
15
+ {"Losses/train_all_loss": 10.91449010031564, "Losses/train_all_loss_mask": 0.0021623846517676223, "Losses/train_all_loss_dice": 9.109422874450683, "Losses/train_all_loss_iou": 0.03527758292870463, "Losses/train_all_loss_class": 1.7265419553599453, "Losses/train_all_core_loss": 10.91449010031564, "Trainer/where": 0.3742857142857143, "Trainer/epoch": 14, "Trainer/steps_train": 525}
16
+ {"Losses/train_all_loss": 12.423973451341901, "Losses/train_all_loss_mask": 0.002862129371247387, "Losses/train_all_loss_dice": 10.686428315298897, "Losses/train_all_loss_iou": 0.07063553876796505, "Losses/train_all_loss_class": 1.6096669961332477, "Losses/train_all_core_loss": 12.423973451341901, "Trainer/where": 0.3992857142857143, "Trainer/epoch": 15, "Trainer/steps_train": 560}
17
+ {"Losses/train_all_loss": 13.769169889177595, "Losses/train_all_loss_mask": 0.004980977749385472, "Losses/train_all_loss_dice": 12.173356628417968, "Losses/train_all_loss_iou": 0.042235918748857716, "Losses/train_all_loss_class": 1.4539576956204006, "Losses/train_all_core_loss": 13.769169889177595, "Trainer/where": 0.42428571428571427, "Trainer/epoch": 16, "Trainer/steps_train": 595}
18
+ {"Losses/train_all_loss": 11.263181223188127, "Losses/train_all_loss_mask": 0.0019078197024230447, "Losses/train_all_loss_dice": 9.721550485066006, "Losses/train_all_loss_iou": 0.0294029886584862, "Losses/train_all_loss_class": 1.4740716303233057, "Losses/train_all_core_loss": 11.263181223188127, "Trainer/where": 0.4492857142857143, "Trainer/epoch": 17, "Trainer/steps_train": 630}
19
+ {"Losses/train_all_loss": 12.981634317125593, "Losses/train_all_loss_mask": 0.010148391163342499, "Losses/train_all_loss_dice": 11.691525718144009, "Losses/train_all_loss_iou": 0.06949597106098995, "Losses/train_all_loss_class": 1.0176445866269725, "Losses/train_all_core_loss": 12.981634317125593, "Trainer/where": 0.4742857142857143, "Trainer/epoch": 18, "Trainer/steps_train": 665}
20
+ {"Losses/train_all_loss": 11.267893355233328, "Losses/train_all_loss_mask": 0.0026320721621492077, "Losses/train_all_loss_dice": 9.648453426361083, "Losses/train_all_loss_iou": 0.06427928070271653, "Losses/train_all_loss_class": 1.502519281820527, "Losses/train_all_core_loss": 11.267893355233328, "Trainer/where": 0.4992857142857143, "Trainer/epoch": 19, "Trainer/steps_train": 700}
21
+ {"Losses/train_all_loss": 12.060971723284041, "Losses/train_all_loss_mask": 0.004294625139196536, "Losses/train_all_loss_dice": 10.43238205228533, "Losses/train_all_loss_iou": 0.0722124845030651, "Losses/train_all_loss_class": 1.470484510040842, "Losses/train_all_core_loss": 12.060971723284041, "Trainer/where": 0.5242857142857142, "Trainer/epoch": 20, "Trainer/steps_train": 735}
22
+ {"Losses/train_all_loss": 10.979690497262137, "Losses/train_all_loss_mask": 0.002396076506036999, "Losses/train_all_loss_dice": 9.051968785694667, "Losses/train_all_loss_iou": 0.03635571563832595, "Losses/train_all_loss_class": 1.8434445864254876, "Losses/train_all_core_loss": 10.979690497262137, "Trainer/where": 0.5492857142857143, "Trainer/epoch": 21, "Trainer/steps_train": 770}
23
+ {"Losses/train_all_loss": 10.912646198272705, "Losses/train_all_loss_mask": 0.002020072060570653, "Losses/train_all_loss_dice": 9.306321804864066, "Losses/train_all_loss_iou": 0.03728623108545435, "Losses/train_all_loss_class": 1.5286366003632013, "Losses/train_all_core_loss": 10.912646198272705, "Trainer/where": 0.5742857142857143, "Trainer/epoch": 22, "Trainer/steps_train": 805}
24
+ {"Losses/train_all_loss": 12.972393226623534, "Losses/train_all_loss_mask": 0.0029303550138138233, "Losses/train_all_loss_dice": 11.663762542179652, "Losses/train_all_loss_iou": 0.03950240197342022, "Losses/train_all_loss_class": 1.210520985183705, "Losses/train_all_core_loss": 12.972393226623534, "Trainer/where": 0.5992857142857143, "Trainer/epoch": 23, "Trainer/steps_train": 840}
25
+ {"Losses/train_all_loss": 11.836965492793492, "Losses/train_all_loss_mask": 0.002555759671875941, "Losses/train_all_loss_dice": 9.82940263066973, "Losses/train_all_loss_iou": 0.06007927144362059, "Losses/train_all_loss_class": 1.8963684562028253, "Losses/train_all_core_loss": 11.836965492793492, "Trainer/where": 0.6242857142857143, "Trainer/epoch": 24, "Trainer/steps_train": 875}
26
+ {"Losses/train_all_loss": 12.333917222704207, "Losses/train_all_loss_mask": 0.005893107972640012, "Losses/train_all_loss_dice": 10.638660866873606, "Losses/train_all_loss_iou": 0.1051822858673404, "Losses/train_all_loss_class": 1.4722119661713284, "Losses/train_all_core_loss": 12.333917222704207, "Trainer/where": 0.6492857142857142, "Trainer/epoch": 25, "Trainer/steps_train": 910}
27
+ {"Losses/train_all_loss": 12.976327841622489, "Losses/train_all_loss_mask": 0.0058499143504637426, "Losses/train_all_loss_dice": 11.136853994641985, "Losses/train_all_loss_iou": 0.07329373683772117, "Losses/train_all_loss_class": 1.6491819693573884, "Losses/train_all_core_loss": 12.976327841622489, "Trainer/where": 0.6742857142857143, "Trainer/epoch": 26, "Trainer/steps_train": 945}
28
+ {"Losses/train_all_loss": 12.715955025809151, "Losses/train_all_loss_mask": 0.0051494090393784326, "Losses/train_all_loss_dice": 11.038715130942208, "Losses/train_all_loss_iou": 0.07952312049164903, "Losses/train_all_loss_class": 1.494728443427344, "Losses/train_all_core_loss": 12.715955025809151, "Trainer/where": 0.6992857142857143, "Trainer/epoch": 27, "Trainer/steps_train": 980}
29
+ {"Losses/train_all_loss": 12.254196275983539, "Losses/train_all_loss_mask": 0.002468374198568719, "Losses/train_all_loss_dice": 10.371674939564295, "Losses/train_all_loss_iou": 0.035466401658517756, "Losses/train_all_loss_class": 1.7976875406590158, "Losses/train_all_core_loss": 12.254196275983539, "Trainer/where": 0.7242857142857143, "Trainer/epoch": 28, "Trainer/steps_train": 1015}
30
+ {"Losses/train_all_loss": 11.89058301108224, "Losses/train_all_loss_mask": 0.0033038316294550897, "Losses/train_all_loss_dice": 10.133634703499931, "Losses/train_all_loss_iou": 0.06524576840498152, "Losses/train_all_loss_class": 1.6256259542756848, "Losses/train_all_core_loss": 11.89058301108224, "Trainer/where": 0.7492857142857143, "Trainer/epoch": 29, "Trainer/steps_train": 1050}
31
+ {"Losses/train_all_loss": 12.994428975241524, "Losses/train_all_loss_mask": 0.0032894188288732297, "Losses/train_all_loss_dice": 11.455551188332693, "Losses/train_all_loss_iou": 0.05269411236804444, "Losses/train_all_loss_class": 1.4203952280871037, "Losses/train_all_core_loss": 12.994428975241524, "Trainer/where": 0.7742857142857142, "Trainer/epoch": 30, "Trainer/steps_train": 1085}
32
+ {"Losses/train_all_loss": 11.028971740177699, "Losses/train_all_loss_mask": 0.0017663538056824888, "Losses/train_all_loss_dice": 9.26269657952445, "Losses/train_all_loss_iou": 0.020907437572480245, "Losses/train_all_loss_class": 1.7100406129179255, "Losses/train_all_core_loss": 11.028971740177699, "Trainer/where": 0.7992857142857143, "Trainer/epoch": 31, "Trainer/steps_train": 1120}
33
+ {"Losses/train_all_loss": 12.77100260598319, "Losses/train_all_loss_mask": 0.005347782664466649, "Losses/train_all_loss_dice": 11.073454679761614, "Losses/train_all_loss_iou": 0.1097778601632204, "Losses/train_all_loss_class": 1.480814137798734, "Losses/train_all_core_loss": 12.77100260598319, "Trainer/where": 0.8242857142857142, "Trainer/epoch": 32, "Trainer/steps_train": 1155}
34
+ {"Losses/train_all_loss": 12.876578099387032, "Losses/train_all_loss_mask": 0.014166244934313, "Losses/train_all_loss_dice": 10.838990374973841, "Losses/train_all_loss_iou": 0.09881567690504849, "Losses/train_all_loss_class": 1.6554468802575555, "Losses/train_all_core_loss": 12.876578099387032, "Trainer/where": 0.8492857142857142, "Trainer/epoch": 33, "Trainer/steps_train": 1190}
35
+ {"Losses/train_all_loss": 10.704918697902135, "Losses/train_all_loss_mask": 0.0019853250996675342, "Losses/train_all_loss_dice": 8.530561372212, "Losses/train_all_loss_iou": 0.022953493090088678, "Losses/train_all_loss_class": 2.1116973195324786, "Losses/train_all_core_loss": 10.704918697902135, "Trainer/where": 0.8742857142857142, "Trainer/epoch": 34, "Trainer/steps_train": 1225}
36
+ {"Losses/train_all_loss": 12.674189213344029, "Losses/train_all_loss_mask": 0.004104748995242906, "Losses/train_all_loss_dice": 11.569254003252302, "Losses/train_all_loss_iou": 0.013460466006654315, "Losses/train_all_loss_class": 1.0093798162881285, "Losses/train_all_core_loss": 12.674189213344029, "Trainer/where": 0.8992857142857142, "Trainer/epoch": 35, "Trainer/steps_train": 1260}
37
+ {"Losses/train_all_loss": 13.152547141483852, "Losses/train_all_loss_mask": 0.00731085481404859, "Losses/train_all_loss_dice": 11.433105080468314, "Losses/train_all_loss_iou": 0.04558718047472732, "Losses/train_all_loss_class": 1.5276377756015531, "Losses/train_all_core_loss": 13.152547141483852, "Trainer/where": 0.9242857142857142, "Trainer/epoch": 36, "Trainer/steps_train": 1295}
38
+ {"Losses/train_all_loss": 10.936215046473912, "Losses/train_all_loss_mask": 0.00343067699328198, "Losses/train_all_loss_dice": 9.400099325180054, "Losses/train_all_loss_iou": 0.0718443956205322, "Losses/train_all_loss_class": 1.3956576879734972, "Losses/train_all_core_loss": 10.936215046473912, "Trainer/where": 0.9492857142857142, "Trainer/epoch": 37, "Trainer/steps_train": 1330}
39
+ {"Losses/train_all_loss": 13.038009902409145, "Losses/train_all_loss_mask": 0.001936620579467022, "Losses/train_all_loss_dice": 11.594574492318289, "Losses/train_all_loss_iou": 0.04597411672626289, "Losses/train_all_loss_class": 1.3587288303616722, "Losses/train_all_core_loss": 13.038009902409145, "Trainer/where": 0.9742857142857142, "Trainer/epoch": 38, "Trainer/steps_train": 1365}
40
+ {"Losses/train_all_loss": 12.413451303754535, "Losses/train_all_loss_mask": 0.003644983982667327, "Losses/train_all_loss_dice": 10.79392305782863, "Losses/train_all_loss_iou": 0.06104640555193847, "Losses/train_all_loss_class": 1.4855821575769887, "Losses/train_all_core_loss": 12.413451303754535, "Trainer/where": 0.9992857142857142, "Trainer/epoch": 39, "Trainer/steps_train": 1400}
tensorboard/events.out.tfevents.1736037893.creative-turing-2.3136573.0928bfd69-289f-40fa-b70d-977964f79bac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef16ba09a2d7d28dfd71fc34ed86c95781121864f19f799501a5debbd8b511e
3
+ size 88
tensorboard/events.out.tfevents.1736039984.creative-turing-2.3143541.09b0a183e-0877-483b-8228-cdb52183d13b ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0ed72dea64be1d50847af3b7740ffc58bc67e2de16e0e4ef126913cf2db0c60
3
+ size 88
tensorboard/events.out.tfevents.1736040155.creative-turing-2.3145589.0dbed95eb-8481-4404-acdf-20879f1218f7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35f0bf01d7b2106b174906520aaad4b9c0b8e5630c38175e48063e2ad53abb28
3
+ size 665159
tensorboard/events.out.tfevents.1737390155.creative-turing-2.1898139.03b45d984-ad8b-4ade-ab73-ec2387852ef3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab05c602949487bfcfa2855872c34da74e117a8562a81d33960ed09e1a8d1785
3
+ size 88