File size: 50,734 Bytes
f1fa80d
 
 
 
 
 
 
 
 
579f9c2
f1fa80d
 
 
 
656d049
f1fa80d
656d049
 
 
 
 
ec658ab
 
656d049
ec658ab
 
656d049
ec658ab
 
656d049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579f9c2
ec658ab
 
579f9c2
ec658ab
 
579f9c2
ec658ab
 
579f9c2
ec658ab
 
579f9c2
656d049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579f9c2
656d049
ec658ab
 
656d049
 
 
ec658ab
 
656d049
ec658ab
 
656d049
ec658ab
e39a070
656d049
e39a070
 
656d049
e39a070
 
656d049
 
e39a070
 
656d049
e39a070
656d049
e39a070
 
656d049
 
e39a070
 
656d049
 
e39a070
 
656d049
e39a070
 
656d049
 
579f9c2
 
656d049
f1fa80d
 
656d049
f1fa80d
 
656d049
e39a070
f1fa80d
 
e39a070
f1fa80d
 
e39a070
f1fa80d
 
e39a070
f1fa80d
ec658ab
e39a070
 
f1fa80d
 
e39a070
f1fa80d
9e7a946
e39a070
 
 
 
9e7a946
 
e39a070
9e7a946
 
e39a070
 
 
579f9c2
 
e39a070
579f9c2
 
e39a070
 
 
579f9c2
 
e39a070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656d049
 
9e7a946
656d049
e39a070
 
656d049
e39a070
 
656d049
e39a070
 
656d049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
 
 
 
 
 
 
 
3959705
f1fa80d
 
656d049
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
656d049
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
656d049
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
656d049
f1fa80d
 
656d049
f1fa80d
 
656d049
f1fa80d
 
 
 
 
 
 
 
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
 
 
 
 
 
 
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
579f9c2
f1fa80d
 
579f9c2
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
3959705
f1fa80d
 
 
 
 
3959705
f1fa80d
 
 
 
 
656d049
f1fa80d
656d049
f1fa80d
3959705
 
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656d049
 
 
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656d049
f1fa80d
 
656d049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3959705
f1fa80d
3959705
579f9c2
f1fa80d
3959705
9e7a946
 
 
656d049
f1fa80d
656d049
 
 
 
 
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656d049
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656d049
f1fa80d
 
 
 
 
 
656d049
579f9c2
656d049
 
 
f1fa80d
 
 
 
ec7b924
656d049
 
 
ec7b924
 
656d049
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3959705
f1fa80d
 
 
 
 
 
 
 
 
 
 
3959705
f1fa80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
---
language:
- en
license: apache-2.0
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- generated_from_trainer
- dataset_size:36
- loss:MatryoshkaLoss
- loss:MultipleNegativesRankingLoss
base_model: Snowflake/snowflake-arctic-embed-m-v1.5
widget:
- source_sentence: How can I connect a GCP Image Builder to resources using ZenML?
  sentences:
  - "_run.steps[step_name]\n    whylogs_step.visualize()if __name__ == \"__main__\"\
    :\n    visualize_statistics(\"data_loader\")\n    visualize_statistics(\"train_data_profiler\"\
    , \"test_data_profiler\")\n\nPreviousEvidentlyNextDevelop a custom data validator\n\
    \nLast updated 1 month ago"
  - 'Implement a custom integration


    Creating an external integration and contributing to ZenML


    PreviousContribute to ZenMLNextOverview


    Last updated 4 months ago'
  - "--connector <CONNECTOR_ID>\n\nExample Command Output$ zenml image-builder connect\
    \ gcp-image-builder --connector gcp-generic\nSuccessfully connected image builder\
    \ `gcp-image-builder` to the following resources:\n┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━┓\n\
    ┃             CONNECTOR ID             │ CONNECTOR NAME │ CONNECTOR TYPE │ RESOURCE\
    \ TYPE  │ RESOURCE NAMES ┃\n┠──────────────────────────────────────┼────────────────┼────────────────┼────────────────┼────────────────┨\n\
    ┃ bfdb657d-d808-47e7-9974-9ba6e4919d83 │ gcp-generic    │ \U0001F535 gcp     \
    \    │ \U0001F535 gcp-generic │ zenml-core     ┃\n┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━┛\n\
    \nAs a final step, you can use the GCP Image Builder in a ZenML Stack:\n\n# Register\
    \ and set a stack with the new image builder\nzenml stack register <STACK_NAME>\
    \ -i <IMAGE_BUILDER_NAME> ... --set\n\nWhen you register the GCP Image Builder,\
    \ you can generate a GCP Service Account Key, save it to a local file and then\
    \ reference it in the Image Builder configuration.\n\nThis method has the advantage\
    \ that you don't need to install and configure the GCP CLI on your host, but it's\
    \ still not as secure as using a GCP Service Connector and the stack component\
    \ configuration is not portable to other hosts.\n\nFor this method, you need to\
    \ create a user-managed GCP service account, and grant it privileges to access\
    \ the Cloud Build API and to run Cloud Builder jobs (e.g. the Cloud Build Editor\
    \ IAM role.\n\nWith the service account key downloaded to a local file, you can\
    \ register the GCP Image Builder as follows:\n\nzenml image-builder register <IMAGE_BUILDER_NAME>\
    \ \\\n    --flavor=gcp \\\n    --project=<GCP_PROJECT_ID> \\\n    --service_account_path=<PATH_TO_SERVICE_ACCOUNT_KEY>\
    \ \\\n    --cloud_builder_image=<BUILDER_IMAGE_NAME> \\\n    --network=<DOCKER_NETWORK>\
    \ \\\n    --build_timeout=<BUILD_TIMEOUT_IN_SECONDS>"
- source_sentence: How do I register and activate a ZenML stack with a new GCP Image
    Builder while ensuring proper authentication?
  sentences:
  - "oad the returned whylogs profile to WhyLabs, e.g.:import pandas as pd\nfrom whylogs.core\
    \ import DatasetProfileView\nimport whylogs as why\nfrom zenml import step\nfrom\
    \ zenml.integrations.whylogs.flavors.whylogs_data_validator_flavor import (\n\
    \    WhylogsDataValidatorSettings,\n)\n\nwhylogs_settings = WhylogsDataValidatorSettings(\n\
    \    enable_whylabs=True, dataset_id=\"<WHYLABS_DATASET_ID>\"\n)\n\n@step(\n \
    \   settings={\n        \"data_validator\": whylogs_settings\n    }\n)\ndef data_profiler(\n\
    \        dataset: pd.DataFrame,\n) -> DatasetProfileView:\n    \"\"\"Custom data\
    \ profiler step with whylogs\n\nArgs:\n        dataset: a Pandas DataFrame\n\n\
    Returns:\n        Whylogs Profile generated for the dataset\n    \"\"\"\n\n# validation\
    \ pre-processing (e.g. dataset preparation) can take place here\n\nresults = why.log(dataset)\n\
    \    profile = results.profile()\n\n# validation post-processing (e.g. interpret\
    \ results, take actions) can happen here\n\nreturn profile.view()\n\nVisualizing\
    \ whylogs Profiles\n\nYou can view visualizations of the whylogs profiles generated\
    \ by your pipeline steps directly in the ZenML dashboard by clicking on the respective\
    \ artifact in the pipeline run DAG.\n\nAlternatively, if you are running inside\
    \ a Jupyter notebook, you can load and render the whylogs profiles using the artifact.visualize()\
    \ method, e.g.:\n\nfrom zenml.client import Client\n\ndef visualize_statistics(\n\
    \    step_name: str, reference_step_name: Optional[str] = None\n) -> None:\n \
    \   \"\"\"Helper function to visualize whylogs statistics from step artifacts.\n\
    \nArgs:\n        step_name: step that generated and returned a whylogs profile\n\
    \        reference_step_name: an optional second step that generated a whylogs\n\
    \            profile to use for data drift visualization where two whylogs\n \
    \           profiles are required.\n    \"\"\"\n    pipe = Client().get_pipeline(pipeline=\"\
    data_profiling_pipeline\")\n    whylogs_step = pipe.last_run.steps[step_name]\n\
    \    whylogs_step.visualize()"
  - "ogsDataValidatorSettings,\n)\nfrom zenml import step@step(\n    settings={\n\
    \        \"data_validator\": WhylogsDataValidatorSettings(\n            enable_whylabs=True,\
    \ dataset_id=\"model-1\"\n        )\n    }\n)\ndef data_loader() -> Tuple[\n \
    \   Annotated[pd.DataFrame, \"data\"],\n    Annotated[DatasetProfileView, \"profile\"\
    ]\n]:\n    \"\"\"Load the diabetes dataset.\"\"\"\n    X, y = datasets.load_diabetes(return_X_y=True,\
    \ as_frame=True)\n\n# merge X and y together\n    df = pd.merge(X, y, left_index=True,\
    \ right_index=True)\n\nprofile = why.log(pandas=df).profile().view()\n    return\
    \ df, profile\n\nHow do you use it?\n\nWhylogs's profiling functions take in a\
    \ pandas.DataFrame dataset generate a DatasetProfileView object containing all\
    \ the relevant information extracted from the dataset.\n\nThere are three ways\
    \ you can use whylogs in your ZenML pipelines that allow different levels of flexibility:\n\
    \ninstantiate, configure and insert the standard WhylogsProfilerStep shipped with\
    \ ZenML into your pipelines. This is the easiest way and the recommended approach,\
    \ but can only be customized through the supported step configuration parameters.\n\
    \ncall the data validation methods provided by the whylogs Data Validator in your\
    \ custom step implementation. This method allows for more flexibility concerning\
    \ what can happen in the pipeline step, but you are still limited to the functionality\
    \ implemented in the Data Validator.\n\nuse the whylogs library directly in your\
    \ custom step implementation. This gives you complete freedom in how you are using\
    \ whylogs's features.\n\nYou can visualize whylogs profiles in Jupyter notebooks\
    \ or view them directly in the ZenML dashboard.\n\nThe whylogs standard step"
  - " build to finish. More information: Build Timeout.We can register the image builder\
    \ and use it in our active stack:\n\nzenml image-builder register <IMAGE_BUILDER_NAME>\
    \ \\\n    --flavor=gcp \\\n    --cloud_builder_image=<BUILDER_IMAGE_NAME> \\\n\
    \    --network=<DOCKER_NETWORK> \\\n    --build_timeout=<BUILD_TIMEOUT_IN_SECONDS>\n\
    \n# Register and activate a stack with the new image builder\nzenml stack register\
    \ <STACK_NAME> -i <IMAGE_BUILDER_NAME> ... --set\n\nYou also need to set up authentication\
    \ required to access the Cloud Build GCP services.\n\nAuthentication Methods\n\
    \nIntegrating and using a GCP Image Builder in your pipelines is not possible\
    \ without employing some form of authentication. If you're looking for a quick\
    \ way to get started locally, you can use the Local Authentication method. However,\
    \ the recommended way to authenticate to the GCP cloud platform is through a GCP\
    \ Service Connector. This is particularly useful if you are configuring ZenML\
    \ stacks that combine the GCP Image Builder with other remote stack components\
    \ also running in GCP.\n\nThis method uses the implicit GCP authentication available\
    \ in the environment where the ZenML code is running. On your local machine, this\
    \ is the quickest way to configure a GCP Image Builder. You don't need to supply\
    \ credentials explicitly when you register the GCP Image Builder, as it leverages\
    \ the local credentials and configuration that the Google Cloud CLI stores on\
    \ your local machine. However, you will need to install and set up the Google\
    \ Cloud CLI on your machine as a prerequisite, as covered in the Google Cloud\
    \ documentation , before you register the GCP Image Builder.\n\nStacks using the\
    \ GCP Image Builder set up with local authentication are not portable across environments.\
    \ To make ZenML pipelines fully portable, it is recommended to use a GCP Service\
    \ Connector to authenticate your GCP Image Builder to the GCP cloud platform."
- source_sentence: How can I register and set a stack with a new image builder using
    ZenML?
  sentences:
  - 'ZenML - Bridging the gap between ML & Ops


    Legacy Docs


    Bleeding EdgeLegacy Docs0.67.0


    🧙‍♂️Find older version our docs


    Powered by GitBook'
  - "> \\\n    --build_timeout=<BUILD_TIMEOUT_IN_SECONDS># Register and set a stack\
    \ with the new image builder\nzenml stack register <STACK_NAME> -i <IMAGE_BUILDER_NAME>\
    \ ... --set\n\nCaveats\n\nAs described in this Google Cloud Build documentation\
    \ page, Google Cloud Build uses containers to execute the build steps which are\
    \ automatically attached to a network called cloudbuild that provides some Application\
    \ Default Credentials (ADC), that allow the container to be authenticated and\
    \ therefore use other GCP services.\n\nBy default, the GCP Image Builder is executing\
    \ the build command of the ZenML Pipeline Docker image with the option --network=cloudbuild,\
    \ so the ADC provided by the cloudbuild network can also be used in the build.\
    \ This is useful if you want to install a private dependency from a GCP Artifact\
    \ Registry, but you will also need to use a custom base parent image with the\
    \ keyrings.google-artifactregistry-auth installed, so pip can connect and authenticate\
    \ in the private artifact registry to download the dependency.\n\nFROM zenmldocker/zenml:latest\n\
    \nRUN pip install keyrings.google-artifactregistry-auth\n\nThe above Dockerfile\
    \ uses zenmldocker/zenml:latest as a base image, but is recommended to change\
    \ the tag to specify the ZenML version and Python version like 0.33.0-py3.10.\n\
    \nPreviousKaniko Image BuilderNextDevelop a Custom Image Builder\n\nLast updated\
    \ 21 days ago"
  - "res Spark to handle the resource configuration.\"\"\"def _backend_configuration(\n\
    \            self,\n            spark_config: SparkConf,\n            step_config:\
    \ \"StepConfiguration\",\n    ) -> None:\n        \"\"\"Configures Spark to handle\
    \ backends like YARN, Mesos or Kubernetes.\"\"\"\n\ndef _io_configuration(\n \
    \           self,\n            spark_config: SparkConf\n    ) -> None:\n     \
    \   \"\"\"Configures Spark to handle different input/output sources.\"\"\"\n\n\
    def _additional_configuration(\n            self,\n            spark_config: SparkConf\n\
    \    ) -> None:\n        \"\"\"Appends the user-defined configuration parameters.\"\
    \"\"\n\ndef _launch_spark_job(\n            self,\n            spark_config: SparkConf,\n\
    \            entrypoint_command: List[str]\n    ) -> None:\n        \"\"\"Generates\
    \ and executes a spark-submit command.\"\"\"\n\ndef launch(\n            self,\n\
    \            info: \"StepRunInfo\",\n            entrypoint_command: List[str],\n\
    \    ) -> None:\n        \"\"\"Launches the step on Spark.\"\"\"\n\nUnder the\
    \ base configuration, you will see the main configuration parameters:\n\nmaster\
    \ is the master URL for the cluster where Spark will run. You might see different\
    \ schemes for this URL with varying cluster managers such as Mesos, YARN, or Kubernetes.\n\
    \ndeploy_mode can either be 'cluster' (default) or 'client' and it decides where\
    \ the driver node of the application will run.\n\nsubmit_args is the JSON string\
    \ of a dictionary, which will be used to define additional parameters if required\
    \ ( Spark has a wide variety of parameters, thus including them all in a single\
    \ class was deemed unnecessary.).\n\nIn addition to this configuration, the launch\
    \ method of the step operator gets additional configuration parameters from the\
    \ DockerSettings and ResourceSettings. As a result, the overall configuration\
    \ happens in 4 base methods:\n\n_resource_configuration translates the ZenML ResourceSettings\
    \ object to Spark's own resource configuration.\n\n_backend_configuration is responsible\
    \ for cluster-manager-specific configuration."
- source_sentence: How can I install ZenML with support for a local dashboard, and
    what precautions should I take when installing on a Mac with Apple Silicon?
  sentences:
  - ' visit our PyPi package page.


    Running with Dockerzenml is also available as a Docker image hosted publicly on
    DockerHub. Use the following command to get started in a bash environment with
    zenml available:


    docker run -it zenmldocker/zenml /bin/bash


    If you would like to run the ZenML server with Docker:


    docker run -it -d -p 8080:8080 zenmldocker/zenml-server


    Deploying the server


    Though ZenML can run entirely as a pip package on a local system, complete with
    the dashboard. You can do this easily:


    pip install "zenml[server]"

    zenml up  # opens the dashboard locally


    However, advanced ZenML features are dependent on a centrally-deployed ZenML server
    accessible to other MLOps stack components. You can read more about it here.


    For the deployment of ZenML, you have the option to either self-host it or register
    for a free ZenML Pro account.


    PreviousIntroductionNextCore concepts


    Last updated 20 days ago'
  - 'Evaluation and metrics


    Track how your RAG pipeline improves using evaluation and metrics.


    PreviousBasic RAG inference pipelineNextEvaluation in 65 lines of code


    Last updated 4 months ago'
  - '🧙Installation


    Installing ZenML and getting started.


    ZenML is a Python package that can be installed directly via pip:


    pip install zenml


    Note that ZenML currently supports Python 3.8, 3.9, 3.10, and 3.11. Please make
    sure that you are using a supported Python version.


    Install with the dashboard


    ZenML comes bundled with a web dashboard that lives inside a sister repository.
    In order to get access to the dashboard locally, you need to launch the ZenML
    Server and Dashboard locally. For this, you need to install the optional dependencies
    for the ZenML Server:


    pip install "zenml[server]"


    We highly encourage you to install ZenML in a virtual environment. At ZenML, We
    like to use virtualenvwrapper or pyenv-virtualenv to manage our Python virtual
    environments.


    Installing onto MacOS with Apple Silicon (M1, M2)


    A change in how forking works on Macs running on Apple Silicon means that you
    should set the following environment variable which will ensure that your connections
    to the server remain unbroken:


    export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES


    You can read more about this here. This environment variable is needed if you
    are working with a local server on your Mac, but if you''re just using ZenML as
    a client / CLI and connecting to a deployed server then you don''t need to set
    it.


    Nightly builds


    ZenML also publishes nightly builds under the zenml-nightly package name. These
    are built from the latest develop branch (to which work ready for release is published)
    and are not guaranteed to be stable. To install the nightly build, run:


    pip install zenml-nightly


    Verifying installations


    Once the installation is completed, you can check whether the installation was
    successful either through Bash:


    zenml version


    or through Python:


    import zenml


    print(zenml.__version__)


    If you would like to learn more about the current release, please visit our PyPi
    package page.


    Running with Docker'
- source_sentence: How does the KubernetesSparkStepOperator utilize the PipelineDockerImageBuilder
    class to manage Docker images for Spark jobs on Kubernetes?
  sentences:
  - 'ZenML - Bridging the gap between ML & Ops


    Legacy Docs


    Bleeding EdgeLegacy Docs0.67.0


    🧙‍♂️Find older version our docs


    Powered by GitBook'
  - "nsible for cluster-manager-specific configuration._io_configuration is a critical\
    \ method. Even though we have materializers, Spark might require additional packages\
    \ and configuration to work with a specific filesystem. This method is used as\
    \ an interface to provide this configuration.\n\n_additional_configuration takes\
    \ the submit_args, converts, and appends them to the overall configuration.\n\n\
    Once the configuration is completed, _launch_spark_job comes into play. This takes\
    \ the completed configuration and runs a Spark job on the given master URL with\
    \ the specified deploy_mode. By default, this is achieved by creating and executing\
    \ a spark-submit command.\n\nWarning\n\nIn its first iteration, the pre-configuration\
    \ with _io_configuration method is only effective when it is paired with an S3ArtifactStore\
    \ (which has an authentication secret). When used with other artifact store flavors,\
    \ you might be required to provide additional configuration through the submit_args.\n\
    \nStack Component: KubernetesSparkStepOperator\n\nThe KubernetesSparkStepOperator\
    \ is implemented by subclassing the base SparkStepOperator and uses the PipelineDockerImageBuilder\
    \ class to build and push the required Docker images.\n\nfrom typing import Optional\n\
    \nfrom zenml.integrations.spark.step_operators.spark_step_operator import (\n\
    \    SparkStepOperatorConfig\n)\n\nclass KubernetesSparkStepOperatorConfig(SparkStepOperatorConfig):\n\
    \    \"\"\"Config for the Kubernetes Spark step operator.\"\"\"\n\nnamespace:\
    \ Optional[str] = None\n    service_account: Optional[str] = None\n\nfrom pyspark.conf\
    \ import SparkConf\n\nfrom zenml.utils.pipeline_docker_image_builder import PipelineDockerImageBuilder\n\
    from zenml.integrations.spark.step_operators.spark_step_operator import (\n  \
    \  SparkStepOperator\n)\n\nclass KubernetesSparkStepOperator(SparkStepOperator):\n\
    \    \"\"\"Step operator which runs Steps with Spark on Kubernetes.\"\"\""
  - "ngs/python/Dockerfile -u 0 build\n\nConfiguring RBACAdditionally, you may need\
    \ to create the several resources in Kubernetes in order to give Spark access\
    \ to edit/manage your driver executor pods.\n\nTo do so, create a file called\
    \ rbac.yaml with the following content:\n\napiVersion: v1\nkind: Namespace\nmetadata:\n\
    \  name: spark-namespace\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n\
    \  name: spark-service-account\n  namespace: spark-namespace\n---\napiVersion:\
    \ rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: spark-role\n\
    \  namespace: spark-namespace\nsubjects:\n  - kind: ServiceAccount\n    name:\
    \ spark-service-account\n    namespace: spark-namespace\nroleRef:\n  kind: ClusterRole\n\
    \  name: edit\n  apiGroup: rbac.authorization.k8s.io\n---\n\nAnd then execute\
    \ the following command to create the resources:\n\naws eks --region=$REGION update-kubeconfig\
    \ --name=$EKS_CLUSTER_NAME\n\nkubectl create -f rbac.yaml\n\nLastly, note down\
    \ the namespace and the name of the service account since you will need them when\
    \ registering the stack component in the next step.\n\nHow to use it\n\nTo use\
    \ the KubernetesSparkStepOperator, you need:\n\nthe ZenML spark integration. If\
    \ you haven't installed it already, run\n\nzenml integration install spark\n\n\
    Docker installed and running.\n\nA remote artifact store as part of your stack.\n\
    \nA remote container registry as part of your stack.\n\nA Kubernetes cluster deployed.\n\
    \nWe can then register the step operator and use it in our active stack:\n\nzenml\
    \ step-operator register spark_step_operator \\\n\t--flavor=spark-kubernetes \\\
    \n\t--master=k8s://$EKS_API_SERVER_ENDPOINT \\\n\t--namespace=<SPARK_KUBERNETES_NAMESPACE>\
    \ \\\n\t--service_account=<SPARK_KUBERNETES_SERVICE_ACCOUNT>\n\n# Register the\
    \ stack\nzenml stack register spark_stack \\\n    -o default \\\n    -s spark_step_operator\
    \ \\\n    -a spark_artifact_store \\\n    -c spark_container_registry \\\n   \
    \ -i local_builder \\\n    --set"
pipeline_tag: sentence-similarity
library_name: sentence-transformers
metrics:
- cosine_accuracy@1
- cosine_accuracy@3
- cosine_accuracy@5
- cosine_accuracy@10
- cosine_precision@1
- cosine_precision@3
- cosine_precision@5
- cosine_precision@10
- cosine_recall@1
- cosine_recall@3
- cosine_recall@5
- cosine_recall@10
- cosine_ndcg@10
- cosine_mrr@10
- cosine_map@100
model-index:
- name: zenml/finetuned-snowflake-arctic-embed-m-v1.5
  results:
  - task:
      type: information-retrieval
      name: Information Retrieval
    dataset:
      name: dim 384
      type: dim_384
    metrics:
    - type: cosine_accuracy@1
      value: 0.75
      name: Cosine Accuracy@1
    - type: cosine_accuracy@3
      value: 1.0
      name: Cosine Accuracy@3
    - type: cosine_accuracy@5
      value: 1.0
      name: Cosine Accuracy@5
    - type: cosine_accuracy@10
      value: 1.0
      name: Cosine Accuracy@10
    - type: cosine_precision@1
      value: 0.75
      name: Cosine Precision@1
    - type: cosine_precision@3
      value: 0.3333333333333333
      name: Cosine Precision@3
    - type: cosine_precision@5
      value: 0.2
      name: Cosine Precision@5
    - type: cosine_precision@10
      value: 0.1
      name: Cosine Precision@10
    - type: cosine_recall@1
      value: 0.75
      name: Cosine Recall@1
    - type: cosine_recall@3
      value: 1.0
      name: Cosine Recall@3
    - type: cosine_recall@5
      value: 1.0
      name: Cosine Recall@5
    - type: cosine_recall@10
      value: 1.0
      name: Cosine Recall@10
    - type: cosine_ndcg@10
      value: 0.875
      name: Cosine Ndcg@10
    - type: cosine_mrr@10
      value: 0.8333333333333334
      name: Cosine Mrr@10
    - type: cosine_map@100
      value: 0.8333333333333334
      name: Cosine Map@100
  - task:
      type: information-retrieval
      name: Information Retrieval
    dataset:
      name: dim 256
      type: dim_256
    metrics:
    - type: cosine_accuracy@1
      value: 0.75
      name: Cosine Accuracy@1
    - type: cosine_accuracy@3
      value: 0.75
      name: Cosine Accuracy@3
    - type: cosine_accuracy@5
      value: 1.0
      name: Cosine Accuracy@5
    - type: cosine_accuracy@10
      value: 1.0
      name: Cosine Accuracy@10
    - type: cosine_precision@1
      value: 0.75
      name: Cosine Precision@1
    - type: cosine_precision@3
      value: 0.25
      name: Cosine Precision@3
    - type: cosine_precision@5
      value: 0.2
      name: Cosine Precision@5
    - type: cosine_precision@10
      value: 0.1
      name: Cosine Precision@10
    - type: cosine_recall@1
      value: 0.75
      name: Cosine Recall@1
    - type: cosine_recall@3
      value: 0.75
      name: Cosine Recall@3
    - type: cosine_recall@5
      value: 1.0
      name: Cosine Recall@5
    - type: cosine_recall@10
      value: 1.0
      name: Cosine Recall@10
    - type: cosine_ndcg@10
      value: 0.8576691395183482
      name: Cosine Ndcg@10
    - type: cosine_mrr@10
      value: 0.8125
      name: Cosine Mrr@10
    - type: cosine_map@100
      value: 0.8125
      name: Cosine Map@100
  - task:
      type: information-retrieval
      name: Information Retrieval
    dataset:
      name: dim 128
      type: dim_128
    metrics:
    - type: cosine_accuracy@1
      value: 0.75
      name: Cosine Accuracy@1
    - type: cosine_accuracy@3
      value: 0.75
      name: Cosine Accuracy@3
    - type: cosine_accuracy@5
      value: 1.0
      name: Cosine Accuracy@5
    - type: cosine_accuracy@10
      value: 1.0
      name: Cosine Accuracy@10
    - type: cosine_precision@1
      value: 0.75
      name: Cosine Precision@1
    - type: cosine_precision@3
      value: 0.25
      name: Cosine Precision@3
    - type: cosine_precision@5
      value: 0.2
      name: Cosine Precision@5
    - type: cosine_precision@10
      value: 0.1
      name: Cosine Precision@10
    - type: cosine_recall@1
      value: 0.75
      name: Cosine Recall@1
    - type: cosine_recall@3
      value: 0.75
      name: Cosine Recall@3
    - type: cosine_recall@5
      value: 1.0
      name: Cosine Recall@5
    - type: cosine_recall@10
      value: 1.0
      name: Cosine Recall@10
    - type: cosine_ndcg@10
      value: 0.8576691395183482
      name: Cosine Ndcg@10
    - type: cosine_mrr@10
      value: 0.8125
      name: Cosine Mrr@10
    - type: cosine_map@100
      value: 0.8125
      name: Cosine Map@100
  - task:
      type: information-retrieval
      name: Information Retrieval
    dataset:
      name: dim 64
      type: dim_64
    metrics:
    - type: cosine_accuracy@1
      value: 0.75
      name: Cosine Accuracy@1
    - type: cosine_accuracy@3
      value: 1.0
      name: Cosine Accuracy@3
    - type: cosine_accuracy@5
      value: 1.0
      name: Cosine Accuracy@5
    - type: cosine_accuracy@10
      value: 1.0
      name: Cosine Accuracy@10
    - type: cosine_precision@1
      value: 0.75
      name: Cosine Precision@1
    - type: cosine_precision@3
      value: 0.3333333333333333
      name: Cosine Precision@3
    - type: cosine_precision@5
      value: 0.2
      name: Cosine Precision@5
    - type: cosine_precision@10
      value: 0.1
      name: Cosine Precision@10
    - type: cosine_recall@1
      value: 0.75
      name: Cosine Recall@1
    - type: cosine_recall@3
      value: 1.0
      name: Cosine Recall@3
    - type: cosine_recall@5
      value: 1.0
      name: Cosine Recall@5
    - type: cosine_recall@10
      value: 1.0
      name: Cosine Recall@10
    - type: cosine_ndcg@10
      value: 0.875
      name: Cosine Ndcg@10
    - type: cosine_mrr@10
      value: 0.8333333333333334
      name: Cosine Mrr@10
    - type: cosine_map@100
      value: 0.8333333333333334
      name: Cosine Map@100
---

# zenml/finetuned-snowflake-arctic-embed-m-v1.5

This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Snowflake/snowflake-arctic-embed-m-v1.5](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5) on the json dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.

## Model Details

### Model Description
- **Model Type:** Sentence Transformer
- **Base model:** [Snowflake/snowflake-arctic-embed-m-v1.5](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5) <!-- at revision 4d7418a980f09b897b7e08dcd981603eefde0e3f -->
- **Maximum Sequence Length:** 512 tokens
- **Output Dimensionality:** 768 dimensions
- **Similarity Function:** Cosine Similarity
- **Training Dataset:**
    - json
- **Language:** en
- **License:** apache-2.0

### Model Sources

- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)

### Full Model Architecture

```
SentenceTransformer(
  (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel 
  (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
  (2): Normalize()
)
```

## Usage

### Direct Usage (Sentence Transformers)

First install the Sentence Transformers library:

```bash
pip install -U sentence-transformers
```

Then you can load this model and run inference.
```python
from sentence_transformers import SentenceTransformer

# Download from the 🤗 Hub
model = SentenceTransformer("zenml/finetuned-snowflake-arctic-embed-m-v1.5")
# Run inference
sentences = [
    'How does the KubernetesSparkStepOperator utilize the PipelineDockerImageBuilder class to manage Docker images for Spark jobs on Kubernetes?',
    'nsible for cluster-manager-specific configuration._io_configuration is a critical method. Even though we have materializers, Spark might require additional packages and configuration to work with a specific filesystem. This method is used as an interface to provide this configuration.\n\n_additional_configuration takes the submit_args, converts, and appends them to the overall configuration.\n\nOnce the configuration is completed, _launch_spark_job comes into play. This takes the completed configuration and runs a Spark job on the given master URL with the specified deploy_mode. By default, this is achieved by creating and executing a spark-submit command.\n\nWarning\n\nIn its first iteration, the pre-configuration with _io_configuration method is only effective when it is paired with an S3ArtifactStore (which has an authentication secret). When used with other artifact store flavors, you might be required to provide additional configuration through the submit_args.\n\nStack Component: KubernetesSparkStepOperator\n\nThe KubernetesSparkStepOperator is implemented by subclassing the base SparkStepOperator and uses the PipelineDockerImageBuilder class to build and push the required Docker images.\n\nfrom typing import Optional\n\nfrom zenml.integrations.spark.step_operators.spark_step_operator import (\n    SparkStepOperatorConfig\n)\n\nclass KubernetesSparkStepOperatorConfig(SparkStepOperatorConfig):\n    """Config for the Kubernetes Spark step operator."""\n\nnamespace: Optional[str] = None\n    service_account: Optional[str] = None\n\nfrom pyspark.conf import SparkConf\n\nfrom zenml.utils.pipeline_docker_image_builder import PipelineDockerImageBuilder\nfrom zenml.integrations.spark.step_operators.spark_step_operator import (\n    SparkStepOperator\n)\n\nclass KubernetesSparkStepOperator(SparkStepOperator):\n    """Step operator which runs Steps with Spark on Kubernetes."""',
    "ngs/python/Dockerfile -u 0 build\n\nConfiguring RBACAdditionally, you may need to create the several resources in Kubernetes in order to give Spark access to edit/manage your driver executor pods.\n\nTo do so, create a file called rbac.yaml with the following content:\n\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: spark-namespace\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: spark-service-account\n  namespace: spark-namespace\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: spark-role\n  namespace: spark-namespace\nsubjects:\n  - kind: ServiceAccount\n    name: spark-service-account\n    namespace: spark-namespace\nroleRef:\n  kind: ClusterRole\n  name: edit\n  apiGroup: rbac.authorization.k8s.io\n---\n\nAnd then execute the following command to create the resources:\n\naws eks --region=$REGION update-kubeconfig --name=$EKS_CLUSTER_NAME\n\nkubectl create -f rbac.yaml\n\nLastly, note down the namespace and the name of the service account since you will need them when registering the stack component in the next step.\n\nHow to use it\n\nTo use the KubernetesSparkStepOperator, you need:\n\nthe ZenML spark integration. If you haven't installed it already, run\n\nzenml integration install spark\n\nDocker installed and running.\n\nA remote artifact store as part of your stack.\n\nA remote container registry as part of your stack.\n\nA Kubernetes cluster deployed.\n\nWe can then register the step operator and use it in our active stack:\n\nzenml step-operator register spark_step_operator \\\n\t--flavor=spark-kubernetes \\\n\t--master=k8s://$EKS_API_SERVER_ENDPOINT \\\n\t--namespace=<SPARK_KUBERNETES_NAMESPACE> \\\n\t--service_account=<SPARK_KUBERNETES_SERVICE_ACCOUNT>\n\n# Register the stack\nzenml stack register spark_stack \\\n    -o default \\\n    -s spark_step_operator \\\n    -a spark_artifact_store \\\n    -c spark_container_registry \\\n    -i local_builder \\\n    --set",
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 768]

# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities.shape)
# [3, 3]
```

<!--
### Direct Usage (Transformers)

<details><summary>Click to see the direct usage in Transformers</summary>

</details>
-->

<!--
### Downstream Usage (Sentence Transformers)

You can finetune this model on your own dataset.

<details><summary>Click to expand</summary>

</details>
-->

<!--
### Out-of-Scope Use

*List how the model may foreseeably be misused and address what users ought not to do with the model.*
-->

## Evaluation

### Metrics

#### Information Retrieval

* Datasets: `dim_384`, `dim_256`, `dim_128` and `dim_64`
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)

| Metric              | dim_384   | dim_256    | dim_128    | dim_64    |
|:--------------------|:----------|:-----------|:-----------|:----------|
| cosine_accuracy@1   | 0.75      | 0.75       | 0.75       | 0.75      |
| cosine_accuracy@3   | 1.0       | 0.75       | 0.75       | 1.0       |
| cosine_accuracy@5   | 1.0       | 1.0        | 1.0        | 1.0       |
| cosine_accuracy@10  | 1.0       | 1.0        | 1.0        | 1.0       |
| cosine_precision@1  | 0.75      | 0.75       | 0.75       | 0.75      |
| cosine_precision@3  | 0.3333    | 0.25       | 0.25       | 0.3333    |
| cosine_precision@5  | 0.2       | 0.2        | 0.2        | 0.2       |
| cosine_precision@10 | 0.1       | 0.1        | 0.1        | 0.1       |
| cosine_recall@1     | 0.75      | 0.75       | 0.75       | 0.75      |
| cosine_recall@3     | 1.0       | 0.75       | 0.75       | 1.0       |
| cosine_recall@5     | 1.0       | 1.0        | 1.0        | 1.0       |
| cosine_recall@10    | 1.0       | 1.0        | 1.0        | 1.0       |
| **cosine_ndcg@10**  | **0.875** | **0.8577** | **0.8577** | **0.875** |
| cosine_mrr@10       | 0.8333    | 0.8125     | 0.8125     | 0.8333    |
| cosine_map@100      | 0.8333    | 0.8125     | 0.8125     | 0.8333    |

<!--
## Bias, Risks and Limitations

*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
-->

<!--
### Recommendations

*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
-->

## Training Details

### Training Dataset

#### json

* Dataset: json
* Size: 36 training samples
* Columns: <code>positive</code> and <code>anchor</code>
* Approximate statistics based on the first 36 samples:
  |         | positive                                                                           | anchor                                                                               |
  |:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
  | type    | string                                                                             | string                                                                               |
  | details | <ul><li>min: 13 tokens</li><li>mean: 23.11 tokens</li><li>max: 38 tokens</li></ul> | <ul><li>min: 31 tokens</li><li>mean: 299.64 tokens</li><li>max: 512 tokens</li></ul> |
* Samples:
  | positive                                                                                                                                                                        | anchor                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       |
  |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
  | <code>How does the ZenML BaseService registry manage serialization and re-creation of configurations for BaseService instances as part of the remote model server setup?</code> | <code>e details of the deployment process from the user.It needs to act as a ZenML BaseService registry, where every BaseService instance is used as an internal representation of a remote model server (see the find_model_server abstract method). To achieve this, it must be able to re-create the configuration of a BaseService from information that is persisted externally, alongside, or even as part of the remote model server configuration itself. For example, for model servers that are implemented as Kubernetes resources, the BaseService instances can be serialized and saved as Kubernetes resource annotations. This allows the model deployer to keep track of all externally running model servers and to re-create their corresponding BaseService instance representations at any given time. The model deployer also defines methods that implement basic life-cycle management on remote model servers outside the coverage of a pipeline (see stop_model_server , start_model_server and delete_model_server)....</code>                                                                                     |
  | <code>How do you ensure the MyExperimentTrackerFlavor is properly registered and available in ZenML?</code>                                                                     | <code>gister flavors.my_flavor.MyExperimentTrackerFlavorZenML resolves the flavor class by taking the path where you initialized zenml (via zenml init) as the starting point of resolution. Therefore, please ensure you follow the best practice of initializing zenml at the root of your repository.<br><br>If ZenML does not find an initialized ZenML repository in any parent directory, it will default to the current working directory, but usually, it's better to not have to rely on this mechanism and initialize zenml at the root.<br><br>Afterward, you should see the new flavor in the list of available flavors:<br><br>zenml experiment-tracker flavor list<br><br>It is important to draw attention to when and how these base abstractions are coming into play in a ZenML workflow.<br><br>The CustomExperimentTrackerFlavor class is imported and utilized upon the creation of the custom flavor through the CLI.<br><br>The CustomExperimentTrackerConfig class is imported when someone tries to register/update a stack component with this custom fl...</code>                                                 |
  | <code>How do you load and profile a dataset using the Whylogs data validator in ZenML?</code>                                                                                   | <code>ogsDataValidatorSettings,<br>)<br>from zenml import step@step(<br>    settings={<br>        "data_validator": WhylogsDataValidatorSettings(<br>            enable_whylabs=True, dataset_id="model-1"<br>        )<br>    }<br>)<br>def data_loader() -> Tuple[<br>    Annotated[pd.DataFrame, "data"],<br>    Annotated[DatasetProfileView, "profile"]<br>]:<br>    """Load the diabetes dataset."""<br>    X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)<br><br># merge X and y together<br>    df = pd.merge(X, y, left_index=True, right_index=True)<br><br>profile = why.log(pandas=df).profile().view()<br>    return df, profile<br><br>How do you use it?<br><br>Whylogs's profiling functions take in a pandas.DataFrame dataset generate a DatasetProfileView object containing all the relevant information extracted from the dataset.<br><br>There are three ways you can use whylogs in your ZenML pipelines that allow different levels of flexibility:<br><br>instantiate, configure and insert the standard WhylogsProfilerStep shipped with ZenML into your pipelines. This is the easiest ...</code> |
* Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters:
  ```json
  {
      "loss": "MultipleNegativesRankingLoss",
      "matryoshka_dims": [
          384,
          256,
          128,
          64
      ],
      "matryoshka_weights": [
          1,
          1,
          1,
          1
      ],
      "n_dims_per_step": -1
  }
  ```

### Training Hyperparameters
#### Non-Default Hyperparameters

- `eval_strategy`: epoch
- `per_device_train_batch_size`: 4
- `per_device_eval_batch_size`: 16
- `gradient_accumulation_steps`: 16
- `learning_rate`: 2e-05
- `num_train_epochs`: 4
- `lr_scheduler_type`: cosine
- `warmup_ratio`: 0.1
- `tf32`: False
- `load_best_model_at_end`: True
- `optim`: adamw_torch_fused
- `batch_sampler`: no_duplicates

#### All Hyperparameters
<details><summary>Click to expand</summary>

- `overwrite_output_dir`: False
- `do_predict`: False
- `eval_strategy`: epoch
- `prediction_loss_only`: True
- `per_device_train_batch_size`: 4
- `per_device_eval_batch_size`: 16
- `per_gpu_train_batch_size`: None
- `per_gpu_eval_batch_size`: None
- `gradient_accumulation_steps`: 16
- `eval_accumulation_steps`: None
- `torch_empty_cache_steps`: None
- `learning_rate`: 2e-05
- `weight_decay`: 0.0
- `adam_beta1`: 0.9
- `adam_beta2`: 0.999
- `adam_epsilon`: 1e-08
- `max_grad_norm`: 1.0
- `num_train_epochs`: 4
- `max_steps`: -1
- `lr_scheduler_type`: cosine
- `lr_scheduler_kwargs`: {}
- `warmup_ratio`: 0.1
- `warmup_steps`: 0
- `log_level`: passive
- `log_level_replica`: warning
- `log_on_each_node`: True
- `logging_nan_inf_filter`: True
- `save_safetensors`: True
- `save_on_each_node`: False
- `save_only_model`: False
- `restore_callback_states_from_checkpoint`: False
- `no_cuda`: False
- `use_cpu`: False
- `use_mps_device`: False
- `seed`: 42
- `data_seed`: None
- `jit_mode_eval`: False
- `use_ipex`: False
- `bf16`: False
- `fp16`: False
- `fp16_opt_level`: O1
- `half_precision_backend`: auto
- `bf16_full_eval`: False
- `fp16_full_eval`: False
- `tf32`: False
- `local_rank`: 0
- `ddp_backend`: None
- `tpu_num_cores`: None
- `tpu_metrics_debug`: False
- `debug`: []
- `dataloader_drop_last`: False
- `dataloader_num_workers`: 0
- `dataloader_prefetch_factor`: None
- `past_index`: -1
- `disable_tqdm`: False
- `remove_unused_columns`: True
- `label_names`: None
- `load_best_model_at_end`: True
- `ignore_data_skip`: False
- `fsdp`: []
- `fsdp_min_num_params`: 0
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
- `fsdp_transformer_layer_cls_to_wrap`: None
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
- `deepspeed`: None
- `label_smoothing_factor`: 0.0
- `optim`: adamw_torch_fused
- `optim_args`: None
- `adafactor`: False
- `group_by_length`: False
- `length_column_name`: length
- `ddp_find_unused_parameters`: None
- `ddp_bucket_cap_mb`: None
- `ddp_broadcast_buffers`: False
- `dataloader_pin_memory`: True
- `dataloader_persistent_workers`: False
- `skip_memory_metrics`: True
- `use_legacy_prediction_loop`: False
- `push_to_hub`: False
- `resume_from_checkpoint`: None
- `hub_model_id`: None
- `hub_strategy`: every_save
- `hub_private_repo`: False
- `hub_always_push`: False
- `gradient_checkpointing`: False
- `gradient_checkpointing_kwargs`: None
- `include_inputs_for_metrics`: False
- `eval_do_concat_batches`: True
- `fp16_backend`: auto
- `push_to_hub_model_id`: None
- `push_to_hub_organization`: None
- `mp_parameters`: 
- `auto_find_batch_size`: False
- `full_determinism`: False
- `torchdynamo`: None
- `ray_scope`: last
- `ddp_timeout`: 1800
- `torch_compile`: False
- `torch_compile_backend`: None
- `torch_compile_mode`: None
- `dispatch_batches`: None
- `split_batches`: None
- `include_tokens_per_second`: False
- `include_num_input_tokens_seen`: False
- `neftune_noise_alpha`: None
- `optim_target_modules`: None
- `batch_eval_metrics`: False
- `eval_on_start`: False
- `eval_use_gather_object`: False
- `prompts`: None
- `batch_sampler`: no_duplicates
- `multi_dataset_batch_sampler`: proportional

</details>

### Training Logs
| Epoch   | Step  | dim_384_cosine_ndcg@10 | dim_256_cosine_ndcg@10 | dim_128_cosine_ndcg@10 | dim_64_cosine_ndcg@10 |
|:-------:|:-----:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|
| **1.0** | **1** | **0.875**              | **0.875**              | **0.8577**             | **0.875**             |
| 2.0     | 3     | 0.875                  | 0.8577                 | 0.8577                 | 0.875                 |
| 3.0     | 4     | 0.875                  | 0.8577                 | 0.8577                 | 0.875                 |

* The bold row denotes the saved checkpoint.

### Framework Versions
- Python: 3.11.11
- Sentence Transformers: 3.3.1
- Transformers: 4.43.1
- PyTorch: 2.5.1+cu124
- Accelerate: 1.2.0
- Datasets: 3.2.0
- Tokenizers: 0.19.1

## Citation

### BibTeX

#### Sentence Transformers
```bibtex
@inproceedings{reimers-2019-sentence-bert,
    title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
    author = "Reimers, Nils and Gurevych, Iryna",
    booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
    month = "11",
    year = "2019",
    publisher = "Association for Computational Linguistics",
    url = "https://arxiv.org/abs/1908.10084",
}
```

#### MatryoshkaLoss
```bibtex
@misc{kusupati2024matryoshka,
    title={Matryoshka Representation Learning},
    author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi},
    year={2024},
    eprint={2205.13147},
    archivePrefix={arXiv},
    primaryClass={cs.LG}
}
```

#### MultipleNegativesRankingLoss
```bibtex
@misc{henderson2017efficient,
    title={Efficient Natural Language Response Suggestion for Smart Reply},
    author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
    year={2017},
    eprint={1705.00652},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
```

<!--
## Glossary

*Clearly define terms in order to be accessible across audiences.*
-->

<!--
## Model Card Authors

*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
-->

<!--
## Model Card Contact

*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
-->