lvwerra HF staff commited on
Commit
6b8b1ef
·
1 Parent(s): 39692cc

add intro/tldr

Browse files
assets/images/placeholder.png ADDED

Git LFS Details

  • SHA256: c121166b2de694f4bb71dca004c8f413899016751eed5daa1570ba0d5ad9faec
  • Pointer size: 130 Bytes
  • Size of remote file: 53.1 kB
blog-export.html ADDED
The diff for this file is too large to render. See raw diff
 
blog-export.md ADDED
The diff for this file is too large to render. See raw diff
 
convert.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import markdown
3
+ from pathlib import Path
4
+ import sys
5
+
6
+ def convert_md_to_html(filepath):
7
+ input_path = Path(filepath)
8
+ output_path = input_path.with_suffix('.html')
9
+
10
+ try:
11
+ with open(input_path, 'r', encoding='utf-8') as md_file:
12
+ text = md_file.read()
13
+ html = markdown.markdown(text)
14
+
15
+ with open(output_path, 'w', encoding='utf-8', errors='xmlcharrefreplace') as html_file:
16
+ html_file.write(html)
17
+
18
+ print(f"Converted {input_path} -> {output_path}")
19
+
20
+ except FileNotFoundError:
21
+ print(f"Error: Could not find file {input_path}")
22
+ sys.exit(1)
23
+ except Exception as e:
24
+ print(f"Error converting file: {e}")
25
+ sys.exit(1)
26
+
27
+ if __name__ == '__main__':
28
+ if len(sys.argv) != 2:
29
+ print("Usage: python convert.py FILEPATH.md")
30
+ sys.exit(1)
31
+
32
+ convert_md_to_html(sys.argv[1])
dist/assets/images/placeholder.png ADDED

Git LFS Details

  • SHA256: c121166b2de694f4bb71dca004c8f413899016751eed5daa1570ba0d5ad9faec
  • Pointer size: 130 Bytes
  • Size of remote file: 53.1 kB
dist/bibliography.bib CHANGED
@@ -331,4 +331,40 @@ url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md}
331
  eprint={2212.05129},
332
  archivePrefix={arXiv},
333
  primaryClass={cs.AI}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
  }
 
331
  eprint={2212.05129},
332
  archivePrefix={arXiv},
333
  primaryClass={cs.AI}
334
+ }
335
+ @misc{kaplan2020scalinglaws,
336
+ title={Scaling Laws for Neural Language Models},
337
+ author={Jared Kaplan and Sam McCandlish and Tom Henighan and Tom B. Brown and Benjamin Chess and Rewon Child and Scott Gray and Alec Radford and Jeffrey Wu and Dario Amodei},
338
+ year={2020},
339
+ eprint={2001.08361},
340
+ archivePrefix={arXiv},
341
+ primaryClass={cs.LG},
342
+ url={https://arxiv.org/abs/2001.08361},
343
+ }
344
+ @misc{hoffmann2022chinchilla,
345
+ title={Training Compute-Optimal Large Language Models},
346
+ author={Jordan Hoffmann and Sebastian Borgeaud and Arthur Mensch and Elena Buchatskaya and Trevor Cai and Eliza Rutherford and Diego de Las Casas and Lisa Anne Hendricks and Johannes Welbl and Aidan Clark and Tom Hennigan and Eric Noland and Katie Millican and George van den Driessche and Bogdan Damoc and Aurelia Guy and Simon Osindero and Karen Simonyan and Erich Elsen and Jack W. Rae and Oriol Vinyals and Laurent Sifre},
347
+ year={2022},
348
+ eprint={2203.15556},
349
+ archivePrefix={arXiv},
350
+ primaryClass={cs.CL},
351
+ url={https://arxiv.org/abs/2203.15556},
352
+ }
353
+ @misc{grattafiori2024llama3herdmodels,
354
+ title={The Llama 3 Herd of Models},
355
+ author={Aaron Grattafiori and Abhimanyu Dubey and Abhinav Jauhri and Abhinav Pandey and Abhishek Kadian and Ahmad Al-Dahle and Aiesha Letman and Akhil Mathur and Alan Schelten and Alex Vaughan and Amy Yang and Angela Fan and Anirudh Goyal and Anthony Hartshorn and Aobo Yang and Archi Mitra and Archie Sravankumar and Artem Korenev and Arthur Hinsvark and Arun Rao and Aston Zhang and Aurelien Rodriguez and Austen Gregerson and Ava Spataru and Baptiste Roziere and Bethany Biron and Binh Tang and Bobbie Chern and Charlotte Caucheteux and Chaya Nayak and Chloe Bi and Chris Marra and Chris McConnell and Christian Keller and Christophe Touret and Chunyang Wu and Corinne Wong and Cristian Canton Ferrer and Cyrus Nikolaidis and Damien Allonsius and Daniel Song and Danielle Pintz and Danny Livshits and Danny Wyatt and David Esiobu and Dhruv Choudhary and Dhruv Mahajan and Diego Garcia-Olano and Diego Perino and Dieuwke Hupkes and Egor Lakomkin and Ehab AlBadawy and Elina Lobanova and Emily Dinan and Eric Michael Smith and Filip Radenovic and Francisco Guzmán and Frank Zhang and Gabriel Synnaeve and Gabrielle Lee and Georgia Lewis Anderson and Govind Thattai and Graeme Nail and Gregoire Mialon and Guan Pang and Guillem Cucurell and Hailey Nguyen and Hannah Korevaar and Hu Xu and Hugo Touvron and Iliyan Zarov and Imanol Arrieta Ibarra and Isabel Kloumann and Ishan Misra and Ivan Evtimov and Jack Zhang and Jade Copet and Jaewon Lee and Jan Geffert and Jana Vranes and Jason Park and Jay Mahadeokar and Jeet Shah and Jelmer van der Linde and Jennifer Billock and Jenny Hong and Jenya Lee and Jeremy Fu and Jianfeng Chi and Jianyu Huang and Jiawen Liu and Jie Wang and Jiecao Yu and Joanna Bitton and Joe Spisak and Jongsoo Park and Joseph Rocca and Joshua Johnstun and Joshua Saxe and Junteng Jia and Kalyan Vasuden Alwala and Karthik Prasad and Kartikeya Upasani and Kate Plawiak and Ke Li and Kenneth Heafield and Kevin Stone and Khalid El-Arini and Krithika Iyer and Kshitiz Malik and Kuenley Chiu and Kunal Bhalla and Kushal Lakhotia and Lauren Rantala-Yeary and Laurens van der Maaten and Lawrence Chen and Liang Tan and Liz Jenkins and Louis Martin and Lovish Madaan and Lubo Malo and Lukas Blecher and Lukas Landzaat and Luke de Oliveira and Madeline Muzzi and Mahesh Pasupuleti and Mannat Singh and Manohar Paluri and Marcin Kardas and Maria Tsimpoukelli and Mathew Oldham and Mathieu Rita and Maya Pavlova and Melanie Kambadur and Mike Lewis and Min Si and Mitesh Kumar Singh and Mona Hassan and Naman Goyal and Narjes Torabi and Nikolay Bashlykov and Nikolay Bogoychev and Niladri Chatterji and Ning Zhang and Olivier Duchenne and Onur Çelebi and Patrick Alrassy and Pengchuan Zhang and Pengwei Li and Petar Vasic and Peter Weng and Prajjwal Bhargava and Pratik Dubal and Praveen Krishnan and Punit Singh Koura and Puxin Xu and Qing He and Qingxiao Dong and Ragavan Srinivasan and Raj Ganapathy and Ramon Calderer and Ricardo Silveira Cabral and Robert Stojnic and Roberta Raileanu and Rohan Maheswari and Rohit Girdhar and Rohit Patel and Romain Sauvestre and Ronnie Polidoro and Roshan Sumbaly and Ross Taylor and Ruan Silva and Rui Hou and Rui Wang and Saghar Hosseini and Sahana Chennabasappa and Sanjay Singh and Sean Bell and Seohyun Sonia Kim and Sergey Edunov and Shaoliang Nie and Sharan Narang and Sharath Raparthy and Sheng Shen and Shengye Wan and Shruti Bhosale and Shun Zhang and Simon Vandenhende and Soumya Batra and Spencer Whitman and Sten Sootla and Stephane Collot and Suchin Gururangan and Sydney Borodinsky and Tamar Herman and Tara Fowler and Tarek Sheasha and Thomas Georgiou and Thomas Scialom and Tobias Speckbacher and Todor Mihaylov and Tong Xiao and Ujjwal Karn and Vedanuj Goswami and Vibhor Gupta and Vignesh Ramanathan and Viktor Kerkez and Vincent Gonguet and Virginie Do and Vish Vogeti and Vítor Albiero and Vladan Petrovic and Weiwei Chu and Wenhan Xiong and Wenyin Fu and Whitney Meers and Xavier Martinet and Xiaodong Wang and Xiaofang Wang and Xiaoqing Ellen Tan and Xide Xia and Xinfeng Xie and Xuchao Jia and Xuewei Wang and Yaelle Goldschlag and Yashesh Gaur and Yasmine Babaei and Yi Wen and Yiwen Song and Yuchen Zhang and Yue Li and Yuning Mao and Zacharie Delpierre Coudert and Zheng Yan and Zhengxing Chen and Zoe Papakipos and Aaditya Singh and Aayushi Srivastava and Abha Jain and Adam Kelsey and Adam Shajnfeld and Adithya Gangidi and Adolfo Victoria and Ahuva Goldstand and Ajay Menon and Ajay Sharma and Alex Boesenberg and Alexei Baevski and Allie Feinstein and Amanda Kallet and Amit Sangani and Amos Teo and Anam Yunus and Andrei Lupu and Andres Alvarado and Andrew Caples and Andrew Gu and Andrew Ho and Andrew Poulton and Andrew Ryan and Ankit Ramchandani and Annie Dong and Annie Franco and Anuj Goyal and Aparajita Saraf and Arkabandhu Chowdhury and Ashley Gabriel and Ashwin Bharambe and Assaf Eisenman and Azadeh Yazdan and Beau James and Ben Maurer and Benjamin Leonhardi and Bernie Huang and Beth Loyd and Beto De Paola and Bhargavi Paranjape and Bing Liu and Bo Wu and Boyu Ni and Braden Hancock and Bram Wasti and Brandon Spence and Brani Stojkovic and Brian Gamido and Britt Montalvo and Carl Parker and Carly Burton and Catalina Mejia and Ce Liu and Changhan Wang and Changkyu Kim and Chao Zhou and Chester Hu and Ching-Hsiang Chu and Chris Cai and Chris Tindal and Christoph Feichtenhofer and Cynthia Gao and Damon Civin and Dana Beaty and Daniel Kreymer and Daniel Li and David Adkins and David Xu and Davide Testuggine and Delia David and Devi Parikh and Diana Liskovich and Didem Foss and Dingkang Wang and Duc Le and Dustin Holland and Edward Dowling and Eissa Jamil and Elaine Montgomery and Eleonora Presani and Emily Hahn and Emily Wood and Eric-Tuan Le and Erik Brinkman and Esteban Arcaute and Evan Dunbar and Evan Smothers and Fei Sun and Felix Kreuk and Feng Tian and Filippos Kokkinos and Firat Ozgenel and Francesco Caggioni and Frank Kanayet and Frank Seide and Gabriela Medina Florez and Gabriella Schwarz and Gada Badeer and Georgia Swee and Gil Halpern and Grant Herman and Grigory Sizov and Guangyi and Zhang and Guna Lakshminarayanan and Hakan Inan and Hamid Shojanazeri and Han Zou and Hannah Wang and Hanwen Zha and Haroun Habeeb and Harrison Rudolph and Helen Suk and Henry Aspegren and Hunter Goldman and Hongyuan Zhan and Ibrahim Damlaj and Igor Molybog and Igor Tufanov and Ilias Leontiadis and Irina-Elena Veliche and Itai Gat and Jake Weissman and James Geboski and James Kohli and Janice Lam and Japhet Asher and Jean-Baptiste Gaya and Jeff Marcus and Jeff Tang and Jennifer Chan and Jenny Zhen and Jeremy Reizenstein and Jeremy Teboul and Jessica Zhong and Jian Jin and Jingyi Yang and Joe Cummings and Jon Carvill and Jon Shepard and Jonathan McPhie and Jonathan Torres and Josh Ginsburg and Junjie Wang and Kai Wu and Kam Hou U and Karan Saxena and Kartikay Khandelwal and Katayoun Zand and Kathy Matosich and Kaushik Veeraraghavan and Kelly Michelena and Keqian Li and Kiran Jagadeesh and Kun Huang and Kunal Chawla and Kyle Huang and Lailin Chen and Lakshya Garg and Lavender A and Leandro Silva and Lee Bell and Lei Zhang and Liangpeng Guo and Licheng Yu and Liron Moshkovich and Luca Wehrstedt and Madian Khabsa and Manav Avalani and Manish Bhatt and Martynas Mankus and Matan Hasson and Matthew Lennie and Matthias Reso and Maxim Groshev and Maxim Naumov and Maya Lathi and Meghan Keneally and Miao Liu and Michael L. Seltzer and Michal Valko and Michelle Restrepo and Mihir Patel and Mik Vyatskov and Mikayel Samvelyan and Mike Clark and Mike Macey and Mike Wang and Miquel Jubert Hermoso and Mo Metanat and Mohammad Rastegari and Munish Bansal and Nandhini Santhanam and Natascha Parks and Natasha White and Navyata Bawa and Nayan Singhal and Nick Egebo and Nicolas Usunier and Nikhil Mehta and Nikolay Pavlovich Laptev and Ning Dong and Norman Cheng and Oleg Chernoguz and Olivia Hart and Omkar Salpekar and Ozlem Kalinli and Parkin Kent and Parth Parekh and Paul Saab and Pavan Balaji and Pedro Rittner and Philip Bontrager and Pierre Roux and Piotr Dollar and Polina Zvyagina and Prashant Ratanchandani and Pritish Yuvraj and Qian Liang and Rachad Alao and Rachel Rodriguez and Rafi Ayub and Raghotham Murthy and Raghu Nayani and Rahul Mitra and Rangaprabhu Parthasarathy and Raymond Li and Rebekkah Hogan and Robin Battey and Rocky Wang and Russ Howes and Ruty Rinott and Sachin Mehta and Sachin Siby and Sai Jayesh Bondu and Samyak Datta and Sara Chugh and Sara Hunt and Sargun Dhillon and Sasha Sidorov and Satadru Pan and Saurabh Mahajan and Saurabh Verma and Seiji Yamamoto and Sharadh Ramaswamy and Shaun Lindsay and Shaun Lindsay and Sheng Feng and Shenghao Lin and Shengxin Cindy Zha and Shishir Patil and Shiva Shankar and Shuqiang Zhang and Shuqiang Zhang and Sinong Wang and Sneha Agarwal and Soji Sajuyigbe and Soumith Chintala and Stephanie Max and Stephen Chen and Steve Kehoe and Steve Satterfield and Sudarshan Govindaprasad and Sumit Gupta and Summer Deng and Sungmin Cho and Sunny Virk and Suraj Subramanian and Sy Choudhury and Sydney Goldman and Tal Remez and Tamar Glaser and Tamara Best and Thilo Koehler and Thomas Robinson and Tianhe Li and Tianjun Zhang and Tim Matthews and Timothy Chou and Tzook Shaked and Varun Vontimitta and Victoria Ajayi and Victoria Montanez and Vijai Mohan and Vinay Satish Kumar and Vishal Mangla and Vlad Ionescu and Vlad Poenaru and Vlad Tiberiu Mihailescu and Vladimir Ivanov and Wei Li and Wenchen Wang and Wenwen Jiang and Wes Bouaziz and Will Constable and Xiaocheng Tang and Xiaojian Wu and Xiaolan Wang and Xilun Wu and Xinbo Gao and Yaniv Kleinman and Yanjun Chen and Ye Hu and Ye Jia and Ye Qi and Yenda Li and Yilin Zhang and Ying Zhang and Yossi Adi and Youngjin Nam and Yu and Wang and Yu Zhao and Yuchen Hao and Yundi Qian and Yunlu Li and Yuzi He and Zach Rait and Zachary DeVito and Zef Rosnbrick and Zhaoduo Wen and Zhenyu Yang and Zhiwei Zhao and Zhiyu Ma},
356
+ year={2024},
357
+ eprint={2407.21783},
358
+ archivePrefix={arXiv},
359
+ primaryClass={cs.AI},
360
+ url={https://arxiv.org/abs/2407.21783},
361
+ }
362
+ @misc{deepseekai2024deepseekv3technicalreport,
363
+ title={DeepSeek-V3 Technical Report},
364
+ author={DeepSeek-AI and Aixin Liu and Bei Feng and Bing Xue and Bingxuan Wang and Bochao Wu and Chengda Lu and Chenggang Zhao and Chengqi Deng and Chenyu Zhang and Chong Ruan and Damai Dai and Daya Guo and Dejian Yang and Deli Chen and Dongjie Ji and Erhang Li and Fangyun Lin and Fucong Dai and Fuli Luo and Guangbo Hao and Guanting Chen and Guowei Li and H. Zhang and Han Bao and Hanwei Xu and Haocheng Wang and Haowei Zhang and Honghui Ding and Huajian Xin and Huazuo Gao and Hui Li and Hui Qu and J. L. Cai and Jian Liang and Jianzhong Guo and Jiaqi Ni and Jiashi Li and Jiawei Wang and Jin Chen and Jingchang Chen and Jingyang Yuan and Junjie Qiu and Junlong Li and Junxiao Song and Kai Dong and Kai Hu and Kaige Gao and Kang Guan and Kexin Huang and Kuai Yu and Lean Wang and Lecong Zhang and Lei Xu and Leyi Xia and Liang Zhao and Litong Wang and Liyue Zhang and Meng Li and Miaojun Wang and Mingchuan Zhang and Minghua Zhang and Minghui Tang and Mingming Li and Ning Tian and Panpan Huang and Peiyi Wang and Peng Zhang and Qiancheng Wang and Qihao Zhu and Qinyu Chen and Qiushi Du and R. J. Chen and R. L. Jin and Ruiqi Ge and Ruisong Zhang and Ruizhe Pan and Runji Wang and Runxin Xu and Ruoyu Zhang and Ruyi Chen and S. S. Li and Shanghao Lu and Shangyan Zhou and Shanhuang Chen and Shaoqing Wu and Shengfeng Ye and Shengfeng Ye and Shirong Ma and Shiyu Wang and Shuang Zhou and Shuiping Yu and Shunfeng Zhou and Shuting Pan and T. Wang and Tao Yun and Tian Pei and Tianyu Sun and W. L. Xiao and Wangding Zeng and Wanjia Zhao and Wei An and Wen Liu and Wenfeng Liang and Wenjun Gao and Wenqin Yu and Wentao Zhang and X. Q. Li and Xiangyue Jin and Xianzu Wang and Xiao Bi and Xiaodong Liu and Xiaohan Wang and Xiaojin Shen and Xiaokang Chen and Xiaokang Zhang and Xiaosha Chen and Xiaotao Nie and Xiaowen Sun and Xiaoxiang Wang and Xin Cheng and Xin Liu and Xin Xie and Xingchao Liu and Xingkai Yu and Xinnan Song and Xinxia Shan and Xinyi Zhou and Xinyu Yang and Xinyuan Li and Xuecheng Su and Xuheng Lin and Y. K. Li and Y. Q. Wang and Y. X. Wei and Y. X. Zhu and Yang Zhang and Yanhong Xu and Yanhong Xu and Yanping Huang and Yao Li and Yao Zhao and Yaofeng Sun and Yaohui Li and Yaohui Wang and Yi Yu and Yi Zheng and Yichao Zhang and Yifan Shi and Yiliang Xiong and Ying He and Ying Tang and Yishi Piao and Yisong Wang and Yixuan Tan and Yiyang Ma and Yiyuan Liu and Yongqiang Guo and Yu Wu and Yuan Ou and Yuchen Zhu and Yuduan Wang and Yue Gong and Yuheng Zou and Yujia He and Yukun Zha and Yunfan Xiong and Yunxian Ma and Yuting Yan and Yuxiang Luo and Yuxiang You and Yuxuan Liu and Yuyang Zhou and Z. F. Wu and Z. Z. Ren and Zehui Ren and Zhangli Sha and Zhe Fu and Zhean Xu and Zhen Huang and Zhen Zhang and Zhenda Xie and Zhengyan Zhang and Zhewen Hao and Zhibin Gou and Zhicheng Ma and Zhigang Yan and Zhihong Shao and Zhipeng Xu and Zhiyu Wu and Zhongyu Zhang and Zhuoshu Li and Zihui Gu and Zijia Zhu and Zijun Liu and Zilin Li and Ziwei Xie and Ziyang Song and Ziyi Gao and Zizheng Pan},
365
+ year={2024},
366
+ eprint={2412.19437},
367
+ archivePrefix={arXiv},
368
+ primaryClass={cs.CL},
369
+ url={https://arxiv.org/abs/2412.19437},
370
  }
dist/distill.bundle.js CHANGED
The diff for this file is too large to render. See raw diff
 
dist/distill.bundle.js.map CHANGED
The diff for this file is too large to render. See raw diff
 
dist/index.html CHANGED
@@ -7,82 +7,15 @@
7
  <meta name="viewport" content="width=device-width, initial-scale=1">
8
  <meta charset="utf8">
9
  <base target="_blank">
10
- <title>FineWeb: decanting the web for the finest text data at scale</title>
11
  <link rel="stylesheet" href="style.css">
12
- <style>
13
- #controls {
14
- display: grid;
15
- grid-template-columns: 350px 350px;
16
- gap: 1px;
17
- align-items: center;
18
- max-width: 700px;
19
- margin: 0 auto 20px;
20
- padding: 0 10px;
21
- }
22
-
23
- #controls .row {
24
- display: contents;
25
- }
26
-
27
- #controls .cell {
28
- padding: 1px;
29
- box-sizing: border-box;
30
- }
31
-
32
- #controls .column-1 {
33
- display: flex;
34
- align-items: center;
35
- justify-content: space-between;
36
- }
37
-
38
- #controls .column-2 {
39
- display: flex;
40
- align-items: center;
41
- justify-content: space-between;
42
- }
43
-
44
- #controls label {
45
- text-align: right;
46
- padding-right: 10px;
47
- flex: 0 0 auto;
48
- width: 150px;
49
- line-height: 1.5em;
50
- font-size: 0.8em;
51
- }
52
-
53
- #controls input[type="range"] {
54
- width: 50%;
55
- margin: 0 10px;
56
- }
57
-
58
- #controls input[type="number"] {
59
- width: 60px;
60
- height: 20px;
61
- }
62
-
63
- #controls select {
64
- width: 100%;
65
- }
66
-
67
- #controls .column {
68
- display: contents;
69
- }
70
-
71
- #graph svg {
72
- font-family: sans-serif;
73
- }
74
-
75
- #graph svg rect {
76
- cursor: pointer;
77
- }
78
- </style>
79
  </head>
80
 
81
  <body>
82
  <d-front-matter>
83
  <script id='distill-front-matter' type="text/json">{
84
- "title": "🔭 Ultra-Guide to Scaling LLM training",
85
- "description": "This blog covers everything about scaling LLMs in 2024.",
86
  "published": "Sept 28, 2024",
87
  "affiliation": {"name": "HuggingFace"},
88
  "authors": [
@@ -104,7 +37,7 @@
104
  </script>
105
  </d-front-matter>
106
  <d-title>
107
- <h1 class="l-page" style="text-align: center;">🔭 Ultra-Guide to Scaling LLM training</h1>
108
  <div id="title-plot" class="main-plot-container l-screen">
109
  <figure>
110
  <img src="assets/images/banner.png" alt="FineWeb">
@@ -118,1107 +51,168 @@
118
  <d-article>
119
  <d-contents>
120
  </d-contents>
 
 
121
 
122
- <p>The performance of a large language model (LLM) depends heavily on the quality and size of the LLMs.
123
- However, the pretraining datasets for state-of-the-art open LLMs like Llama 3<d-cite
124
- bibtex-key="llama3modelcard"></d-cite> and Mixtral<d-cite bibtex-key="jiang2024mixtral"></d-cite> are
125
- not publicly available and very little is known about how they were created.</p>
126
  <aside>Reading time: 7 days. For the best reading experience, we recommend not using a mobile phone.</aside>
127
 
128
- <p>Recently, we released <a href="https://huggingface.co/datasets/HuggingFaceFW/fineweb"><strong>🍷
129
- FineWeb</strong></a>, a new, large-scale
130
- (<strong>15-trillion tokens, 44TB disk space</strong>) dataset for LLM pretraining. FineWeb is derived from
131
- 96 <a href="https://commoncrawl.org/">CommonCrawl</a> snapshots and produces <strong>better-performing LLMs
132
- than other open pretraining datasets</strong>.
133
 
134
  <aside>We are extremely thankful to the whole <a href="https://distill.pub/">distill.pub</a> team for creating
135
  the template on which we based this blog post.</aside>
 
 
 
 
136
 
137
- <div id="graph" style="position: relative; width: 700px; height: 500px;"></div>
138
  <div id="controls">
139
- <div class="row">
140
- <div class="cell column-1">
141
- <label for="a">Attention Heads (a):</label>
142
- <input type="range" id="a" name="a" min="1" max="128" value="8">
143
- <input type="number" id="a_input" value="8" min="1" max="128">
144
- </div>
145
- <div class="cell column-2">
146
- <label for="mixed">Mixed Precision:</label>
147
- <input type="checkbox" id="mixed" name="mixed" checked>
148
- <span></span> <!-- Empty span to maintain grid alignment -->
149
- </div>
150
  </div>
151
- <div class="row">
152
- <div class="cell column-1">
153
- <label for="b">Micro Batch Size (b):</label>
154
- <input type="range" id="b" name="b" min="1" max="53248" value="32">
155
- <input type="number" id="b_input" value="32" min="1" max="53248">
156
- </div>
157
- <div class="cell column-2">
158
- <label for="seq_parallel">Sequence Parallelism:</label>
159
- <input type="checkbox" id="seq_parallel" name="seq_parallel">
160
- <span></span> <!-- Empty span to maintain grid alignment -->
161
- </div>
162
  </div>
163
- <div class="row">
164
- <div class="cell column-1">
165
- <label for="h">Hidden Dimension (h):</label>
166
- <input type="range" id="h" name="h" min="1" max="16384" value="512">
167
- <input type="number" id="h_input" value="512" min="128" max="16384">
168
- </div>
169
- <div class="cell column-2">
170
- <label for="recomputation">Recomputation:</label>
171
- <select id="recomputation" name="recomputation">
172
- <option value="none">None</option>
173
- <option value="selective">Selective</option>
174
- <option value="full">Full</option>
175
- </select>
176
- <span></span> <!-- Empty span to maintain grid alignment -->
177
-
178
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
179
  </div>
180
- <div class="row">
181
- <div class="cell column-1">
182
- <label for="h_ff">Feedforward Dimension (h_ff):</label>
183
- <input type="range" id="h_ff" name="h_ff" min="1" max="65536" value="2048">
184
- <input type="number" id="h_ff_input" value="2048" min="512" max="65536">
185
- </div>
186
- <div class="cell column-2">
187
- <label for="zero">Zero:</label>
188
- <select id="zero" name="zero">
189
- <option value="0">0</option>
190
- <option value="1">1</option>
191
- <option value="2">2</option>
192
- <option value="3">3</option>
193
- </select>
194
- <span></span> <!-- Empty span to maintain grid alignment -->
195
- </div>
196
  </div>
197
- <div class="row">
198
- <div class="cell column-1">
199
- <label for="L">Number of Layers (L):</label>
200
- <input type="range" id="L" name="L" min="1" max="126" value="12">
201
- <input type="number" id="L_input" value="12" min="1" max="126">
202
- </div>
203
- <div class="cell column-2">
204
- <label for="ff_activation">FF Activation:</label>
205
- <select id="ff_activation" name="ff_activation">
206
- <option value="relu">ReLU</option>
207
- <option value="gelu">GELU</option>
208
- <option value="swiglu">SwiGLU</option>
209
- </select>
210
- <span></span> <!-- Empty span to maintain grid alignment -->
211
- </div>
212
  </div>
213
- <div class="row">
214
- <div class="cell column-1">
215
- <label for="s">Sequence Length (s):</label>
216
- <input type="range" id="s" name="s" min="1" max="128000" value="128">
217
- <input type="number" id="s_input" value="128" min="64" max="128000">
218
- </div>
219
- <div class="cell column-2">
220
- <label for="presets">Presets:</label>
221
- <select id="presets" name="presets">
222
- <option value="Llama 3 Tiny">Llama 3 Tiny</option>
223
- <option value="Llama 3 8B">Llama 3 8B</option>
224
- <option value="Llama 3 70B">Llama 3 70B</option>
225
- <option value="Llama 3 405B">Llama 3 405B</option>
226
- </select>
227
- <span></span> <!-- Empty span to maintain grid alignment -->
228
- </div>
229
  </div>
230
- <div class="row">
231
- <div class="cell column-1">
232
- <label for="v">Vocabulary Size (v):</label>
233
- <input type="range" id="v" name="v" min="1000" max="100000" value="30522">
234
- <input type="number" id="v_input" value="30522" min="1000" max="100000">
235
- </div>
236
- <div class="cell column-2">
237
- <label for="tp">Tensor Parallelism (t):</label>
238
- <input type="range" id="tp" name="tp" min="1" max="16" value="8">
239
- <input type="number" id="tp_input" value="8" min="1" max="16">
240
- </div>
241
  </div>
242
- <div class="row">
243
- <div class="cell column-1">
244
- <label for="k">Optimizer Parameters (k):</label>
245
- <input type="range" id="k" name="k" min="1" max="16" value="8">
246
- <input type="number" id="k_input" value="8" min="1" max="16">
247
- </div>
248
- <div class="cell column-2">
249
- <label for="dp">Data Parallelism (d):</label>
250
- <input type="range" id="dp" name="dp" min="1" max="256" value="1">
251
- <input type="number" id="dp_input" value="1" min="1" max="256">
252
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  </div>
254
  </div>
255
 
256
- <p><strong>TLDR:</strong> This blog covers a discussion on processing and evaluating data quality at scale, the
257
- 🍷 FineWeb
258
- recipe (listing and explaining all of our design choices), and the process followed to create its 📚
259
- FineWeb-Edu subset.</p>
260
-
261
- <h2>Scaling Models and Hardware</h2>
262
-
263
- <p>Now that we know the basics of distributed communication and computations it's time to apply this to training
264
- LLMs at scale. Here's the plan of action: we'll go through increasingly complex distribution strategies,
265
- namely data, then tensor and finally pipeline parallelism, and show three things:</p>
266
-
267
- <ol>
268
- <li>conceptual explanations with diagrams</li>
269
- <li>a minimal coding example illustrating how to implement said strategy</li>
270
- <li>scaling experiments show casing strengths and limits of the method with real data</li>
271
- </ol>
272
-
273
- <p>For the experiments we scale across two dimensions: we make the models larger and larger and add more and
274
- more compute nodes and measure how throughput changes.</p>
275
-
276
- <p>So this is a good point to get ☕ #2 and we'll have a look at the setup for the practical experiments.</p>
277
-
278
- <h2>Experiment setup</h2>
279
-
280
- <table>
281
- <thead>
282
- <tr>
283
- <th></th>
284
- <th><strong>1B (1)</strong></th>
285
- <th><strong>7B</strong></th>
286
- <th><strong>70B</strong></th>
287
- <th><strong>340B (2)</strong></th>
288
- <th><strong>400B (3)</strong></th>
289
- </tr>
290
- </thead>
291
- <tbody>
292
- <tr>
293
- <td><strong>N Layers</strong></td>
294
- <td>24</td>
295
- <td>32</td>
296
- <td>80</td>
297
- <td>96</td>
298
- <td>126</td>
299
- </tr>
300
- <tr>
301
- <td><strong>N Heads</strong></td>
302
- <td>32</td>
303
- <td>32</td>
304
- <td>64</td>
305
- <td>96</td>
306
- <td>128</td>
307
- </tr>
308
- <tr>
309
- <td><strong>Dimension</strong></td>
310
- <td>2048</td>
311
- <td>4096</td>
312
- <td>8192</td>
313
- <td>18432</td>
314
- <td>16384</td>
315
- </tr>
316
- </tbody>
317
- </table>
318
-
319
- <p>(1) FineWeb ablation models</p>
320
- <p>(2) Nemotron-340B architecture (without GQA)</p>
321
- <p>(3) Llama-400B, ffn dim = 1.2 hidden dim (without GQA)</p>
322
-
323
 
324
- <h2>Distribution Methods</h2>
325
 
326
- <p>Efficiently training LLMs now requires amounts of compute which exceed in most case single GPUs or machine.
327
- Large distributed clusters are thus used to train these models and can range from hundreds to thousands of
328
- nodes each usually equipped with up to 8 GPUs. To make the best use of such an expensive hardware, a range
329
- of distributed training methods have been developed with the goal of ensuring that GPUs are highly utilized
330
- at all times and not waiting for data/synchronization/etc.</p>
331
 
332
- <p>Several methods can be used to distribute training and we'll start with 4D parallelism followed-up by
333
- DeepSpeed stages. While we explain these strategies we'll also run experiments to determine the trade-offs
334
- and understand the optimal settings.</p>
335
- <p>The name "4D parallelism" originates from the fact that it involves combining up to 4 distribution methods:
336
- data, tensor, pipeline, and sequence parallelism (each of these techniques can be used independently of the
337
- other). You may thus ask "So which one should I use?".</p>
338
 
339
- <p>Unfortunately, there is no universal answer as the response will actually depend on the cluster setup as well
340
- as the model architecture. But do not despair for in this section we'll develop strategies to figure out the
341
- best setting experimentally!</p>
342
 
343
- <p>In addition to 4D parallelism we'll also take a look at "DeepSpeed", a method developed by Microsoft which is
344
- generally complimentary to 4D parallelism and can be leveraged on top of it.</p>
345
 
346
- <p><strong>Idea: show two things in every section</strong></p>
 
 
 
347
  <ol>
348
- <li>a small toy model (e.g. 4 layer FFN) we can interactively show with every approach</li>
349
- <li>a benchmark showing the improvement/limits of the approach (e.g. when you cross 1 node with TP)</li>
350
- </ol>
351
-
352
- <h3>No Parallelism</h3>
353
-
354
- <p>Let's quickly go over the basics before going into distributed training. When a model is trained on a single
355
- GPU, the training consists of 3 steps in the simplest case:</p>
356
- <ol>
357
- <li>one forward pass,</li>
358
- <li>one backward pass to compute the gradients, and</li>
359
- <li>an optimization step using the gradients to update the parameters</li>
360
- </ol>
361
-
362
- <p>As we'll see in the future, these steps may be repeated or intertwined but for now we'll start simple:</p>
363
- <p>As we'll see in the future, these steps may be repeated or intertwined but for now we'll start simple:</p>
364
-
365
- <img src="assets/images/IMG_7537D08D7F41-1.jpeg" alt="Training Steps">
366
-
367
- <p>In this figure the successive blue boxes on the top line can be seen as successive layers inside a model
368
- (same for the last line). The red boxes are the associated gradients for each of these layers.</p>
369
-
370
- <p>The batch size (<em>bs</em>) is one of the most important hyper-parameters in machine learning, affecting
371
- both model convergence and throughput.</p>
372
-
373
- <p>If the batch size is too small, gradients will tend to be noisy and the model may not be able to converge to
374
- optimal performances while a batch size too large can make the convergence of the model slower and waste
375
- compute. You can find a nice discussion of this topic in OpenAI's paper on large batch training (<a
376
- href="https://arxiv.org/abs/1812.06162">https://arxiv.org/pdf/1812.06162</a>).</p>
377
-
378
- <p>The batch size also affects the throughput: a small batch size will require more optimizer steps to train on
379
- a given amount of samples. Optimizer steps are costly (in compute time) and the throughput will thus be
380
- lower than when using a larger batch size. On the other hand, larger batches, while leading to higher
381
- throughput may suffer from slow convergence in the limits as we've just seen. There is generally an optimal
382
- batch size from a convergence/performance point of view (note that the batch size can usually still be
383
- changed around the optimal batch size without major impact to the performance of the model).</p>
384
-
385
- <p>Note that in the LLM community, batch sizes are commonly reported in terms of tokens instead of number of
386
- samples (BST - Batch Size Tokens) as each token has a label and thus a loss term and can thus be considered
387
- individual (although highly correlated) samples.</p>
388
-
389
- <p>A sweet spot for LLM training is usually on the order of 4-20 million tokens per batch (links GPT-3,
390
- DeepSeek, Llama). In the simplest case, training on a single machine, the <em>BS</em> and <em>BST</em> can
391
- be computed from the model input sequence length as follows:</p>
392
-
393
- <d-math>
394
- bst=bs *seq
395
- </d-math>
396
-
397
- <p>(note that from here on forward we'll show the formulas for the batch size in number of samples but you can
398
- always get its token-unit counterpart by multiplying it with the sequence length)</p>
399
-
400
- <p>And we're now hitting our first scaling problem:</p>
401
-
402
- <blockquote>
403
- <p>what if we can't fit the model into GPU memory even with <code>BS=1</code>?</p>
404
- </blockquote>
405
-
406
- <p>Good question, reader!</p>
407
-
408
- <p>Let's start by understanding what led to our out-of-memory issue in the first place.</p>
409
-
410
- <h2>A brief overview of memory usage in Transformers</h2>
411
-
412
- <p>To train a neural network model, one needs to store many elements in memory besides the weights themselves.
413
- Generally, the memory usage is made up from the following elements:</p>
414
- <ul>
415
- <li>model weights</li>
416
- <li>model gradients</li>
417
- <li>optimizer states</li>
418
- <li>activations computed during the forward pass and which are needed to compute the backward pass</li>
419
- <li>also CUDA Kernels require 1-2GB of GPU memory which you can quickly check yourself by running
420
- <code>import torch; torch.ones((1, 1)).to("cuda")</code> and then checking the GPU memory with
421
- <code>nvidia-smi</code>
422
- </li>
423
- <li>lower rest memory usage from buffers, intermediate results and some memory that can't be used due to
424
- fragmentation</li>
425
  </ul>
426
-
427
- <p>Scaling up training is usually a question of playing with those constituents to keep memory low while not
428
- impacting performance too much. We'll neglect the last two contributors as there's usually not that much you
429
- can do about them unless you dive deep in the code.</p>
430
-
431
- <p>For the rest, they are usually different types of tensors that can have various sizes (usually multiples of
432
- one or several of batch size, sequence length, model hidden dimension and some potential sharding) and
433
- various precisions (with optimizer states and weights copy being often kept in full FP32 precision while
434
- activations can be of lower precision like BF16 or FP8). Let's try to get some intuition for the memory
435
- requirement of these various elements.</p>
436
-
437
- <p>Let's first look at the weights, gradients and optimizer states. They are all dependent on the number of
438
- parameters in a model. For a simple LLM the number of parameters is given by the following formula:</p>
439
-
440
- <d-math>
441
- N = h*v + L * (12 * h^2 + 13*h) + 2*h
442
- </d-math>
443
-
444
- <p>In that equation, <em>h</em> corresponds to the hidden dimension, <em>v</em> to the vocabulary size, and
445
- <em>L</em> the number of layers in the model. Note that looking at the equation we can see that the term
446
- that will dominate at large model scales is the one with <em>h^2</em> since it's the only term growing
447
- quadratically as we scale the models.
448
- </p>
449
-
450
- <p>Let's see how the number of parameters translates to memory usage. The memory requirements for the parameters
451
- and gradients are the number of parameters multiplied by the number of bytes per parameter. Mixed precision
452
- training with BF16 is the default nowadays which requires 2 bytes per parameter. In addition, there are a
453
- number of values necessary for the optimizer states: for ADAM it requires the momentum and the variance in
454
- FP32, each using 4 bytes, and an additional copy of the model weights in FP32, thus 12 bytes per parameter
455
- (ref: <a href="https://arxiv.org/pdf/1910.02054">ZeRO</a>):</p>
456
-
457
- <d-math>
458
- m_{params} = 2 * N
459
- m_{grad} = 2 * N
460
- m_{opt} = (4+4+4) * N
461
- </d-math>
462
-
463
- <p>In old-fashioned full precision training both parameters and gradients would require 4 bytes each but the
464
- optimizer on the other hand wouldn't need to store an extra full precision copy of the weights:</p>
465
-
466
- <d-math>
467
- m_{params} = 4 * N
468
- m_{grad} = 4 * N
469
- m_{opt} = (4+4) * N
470
- </d-math>
471
-
472
- <p>So we can easily see that mixed precision itself doesn't save memory as it just distributes the memory
473
- differently across the three components. So by multiplying the number of parameters by 16 (=2+2+12) you can
474
- quickly get a sense of how much GPU memory we need for a model:</p>
475
- <p>So we can easily see that mixed precision itself doesn't save memory as it just distributes the memory
476
- differently across the three components. So by multiplying the number of parameters by 16 (=2+2+12) you can
477
- quickly get a sense of how much GPU memory we need for a model:</p>
478
-
479
- <table>
480
- <thead>
481
- <tr>
482
- <th>Model parameters</th>
483
- <th>Memory requirements</th>
484
- </tr>
485
- </thead>
486
- <tbody>
487
- <tr>
488
- <td>1B</td>
489
- <td>16 GB</td>
490
- </tr>
491
- <tr>
492
- <td>7B</td>
493
- <td>112 GB</td>
494
- </tr>
495
- <tr>
496
- <td>70B</td>
497
- <td>1120 GB</td>
498
- </tr>
499
- <tr>
500
- <td>405B</td>
501
- <td>6480 GB</td>
502
- </tr>
503
- </tbody>
504
- </table>
505
-
506
- <p>We can further decrease the memory usage if we choose FP8 training instead of BF16 but it is much less stable
507
- and a very active research topic (see <a href="https://x.com/xariusrke/status/1826669126955278401">here</a>)
508
- thus we won't go in details here.</p>
509
-
510
- <p>But we are not done yet, we'll also need to store the forward pass activations which are used during the
511
- backward pass to compute the gradients. The total memory required for the activations in mixed precision
512
- (which contributes the leading factor of 2 below) is given by the following equation:</p>
513
-
514
- <d-math>
515
- m_{act} = 2 * L* seq * bs * h * (34 + \frac{5*n_{heads}*seq}{h})
516
- </d-math>
517
-
518
- <p>You can follow <a href="https://arxiv.org/pdf/2205.05198">this NVIDIA paper</a> for a complete derivation, it
519
- essentially requires you to do some accounting of all the sizes of intermediate activations between each
520
- operation. What's interesting here is that the memory is not static for a given model but depends critically
521
- on the sequence length. We can use the memory formulas and have a look how the memory usage changes for a
522
- model for various sequence lengths:</p>
523
-
524
- <img src="assets/images/image%206.png" alt="Memory Usage Graph 1">
525
- <img src="assets/images/image%207.png" alt="Memory Usage Graph 2">
526
-
527
- <p>This graph tells a striking story: for short sequences, activations are almost negligible, but starting at
528
- around 2-4k tokens they start to take up a significant amount of memory while parameter, gradient and
529
- optimizer state are roughly independent of the sequence length and batch size. For large batch/sequence,
530
- activations however become by far the largest memory burden.</p>
531
-
532
- <p>Is there a way to tame this "activation explosion"?</p>
533
-
534
- <p>Good question, reader! I see you're following well and you're lucky as the answer is "Yes"! Let's talk about
535
- a technique called <strong>gradient checkpointing</strong> or more frequently <strong>activation
536
- recomputation</strong> which can help us cap activation memory footprint and is an essential tool in
537
- today's large model training toolbox.</p>
538
-
539
- <h3>Activation recomputation</h3>
540
-
541
- <p>The general idea behind gradient checkpointing is to discard some activations to save memory if we are
542
- willing to spend some extra compute to recompute them when needed. Typically we will save activations at
543
- some key points in memory and discard the rest and recompute them during the backward pass from the nearest
544
- activations:</p>
545
-
546
- <img src="assets/images/IMG_C4260C5C58DC-1.jpeg" alt="Activation Recompute">
547
-
548
- <p>We can select these key activations according to several strategies and modern frameworks usually choose
549
- among the following three strategies:</p>
550
- <ul>
551
- <li><strong>None</strong>: We don't recompute activations during the backward pass and keep all activations
552
- in memory. While this is the fastest and thus computationally cheapest option, it also requires the most
553
- memory.</li>
554
- <li><strong>Full</strong>: The simplest strategy from a conceptual point of view is to checkpoint
555
- activations between each Transformer layer. This is usually called the <code>full</code> strategy since
556
- it requires a forward pass through each layer essentially adding a full forward pass during the backward
557
- pass. This strategy saves the most memory but is the most expensive one in terms of compute. This
558
- increases the compute cost by up to 30-40% which is very noticeable.</li>
559
- <li><strong>Selective</strong>: In general we can do better than full. The authors of <a
560
- href="https://arxiv.org/pdf/2205.05198">this paper</a> did a detailed analysis studying which
561
- activations grow the largest and have the cheapest recomputation cost in terms of FLOPs. Turns out that
562
- the attention computations fall in that category, and thus we can usually discard them and focus on
563
- checkpointing expensive feedforward computations. Note: for a GPT-3 (175B) model this means 70%
564
- activation memory reduction at a 2.7% compute cost.</li>
565
  </ul>
566
-
567
- <p>Let's see how recomputation strategies can drastically reduce the memory footprint while selective
568
- recomputation strikes a nice balance between memory saving and recomputation cost:</p>
569
- <p>Let's see how recomputation strategies can drastically reduce the memory footprint while selective
570
- recomputation strikes a nice balance between memory saving and recomputation cost:</p>
571
-
572
- <img src="assets/images/image%208.png" alt="Recomputation Strategies">
573
-
574
- <p>Note: Hardware vs Model flops.</p>
575
-
576
- <p>Most frameworks these days use FlashAttention (TODO: see later) which makes the attention computation less
577
- memory intensive through kernel fusion, thus most trainings use the <code>full</code> settings.</p>
578
-
579
- <p>We can save some GPU memory with activation recomputation but this only delays by a bit the next bottleneck:
580
- as hinted earlier for LLM training there is usually a sweet spot for the GBST and we need to work out the
581
- training configuration backward from there. However, you can't choose MBS to be an arbitrary large number on
582
- your GPU; at some point you will run out of GPU memory again since you need to store at least some of the
583
- activations in memory.</p>
584
-
585
- <p>There is a useful trick to compensate for that: <strong>gradient accumulation</strong> (<em>GradAcc</em>).
586
- With gradient accumulation we will split our batch in micro-batch, do forward and backward passes repeatedly
587
- on each micro-batch, compute the gradients, and, as the name suggests, sum the gradients step by step before
588
- doing a final optimizer step.</p>
589
-
590
- <p>We call the <code>micro batch size</code> (MBS) the batch size for each forward pass on a single node (the
591
- number of samples flowing through the model in one forward pass). We'll refer to the overall batch size
592
- between each optimizer step as the <code>global batch size</code> (GBS). If we do one optimizer step each 8
593
- forward/backward pass, the <code>global batch size</code> will be 8 times the <code>micro batch size</code>.
594
- </p>
595
-
596
- <p>What we now call <code>global batch size</code> thus corresponds to what we've called up to now just
597
- <code>batch size</code> for simplicity (we now make the terms more precise to avoid ambiguity).
598
- </p>
599
-
600
- <p>With gradient accumulation the global batch size can be computed as follows:</p>
601
-
602
- <d-math>
603
- BS = GBS=MBS * GradAcc
604
- </d-math>
605
-
606
- <p>Gradient accumulation allows us to effectively increase our batch size up to infinity (!) while the memory
607
- footprint stays constant. Gradient accumulation is also compatible with activation recomputation for further
608
- memory reduction. One drawback however, is that gradient accumulation requires multiple consecutive
609
- forward/backward passes per optimization step thereby increasing the compute overhead and slowing down
610
- training. No free lunch!</p>
611
-
612
- <img src="assets/images/IMG_DA188FF29F45-1.jpeg" alt="Gradient Accumulation">
613
-
614
- <p>This is actually a bummer since the forward/backward passes for each micro-batch could actually totally be
615
- run in parallel. They are independent from each other and the only changing parameter are the input samples.
616
- </p>
617
-
618
- <p>Here comes data parallelism to solve exactly this problem! Let's take a look, you say? Okay sure!</p>
619
-
620
- <h3>Data Parallelism</h3>
621
-
622
- <p>The idea behind data parallelism (DP) is to parallelize forward and backward passes across GPUs, passing
623
- different batches of data per GPU (or groups of GPUs) to the same model instance. Just like for gradient
624
- accumulation, we need to average gradients across instances before we do the optimization step. The GBS
625
- equation can then be extended to:</p>
626
-
627
- <d-math>
628
- GBS=MBS * GradAcc * DP
629
- </d-math>
630
-
631
- <p>This means that we can reduce the number of gradient accumulation steps in favor of data parallel processes
632
- which speeds up training. In practice, people will tend to max out the number of data parallel nodes (the DP
633
- above) as much as possible as it's inherently parallel versus the sequential Gradient Accumulation. Gradient
634
- accumulation is then added only to achieve a target batch size if DP alone is not sufficient. One exception
635
- to that is pipeline parallelism which we'll discuss later.</p>
636
-
637
- <img src="assets/images/IMG_A95961668B3F-1.jpeg" alt="Data Parallelism">
638
-
639
- <p>As you can see on the figure above, some gradients can already be gathered and summed (red boxes) even before
640
- gradients down the line (red boxes on the left of the current gradient) are still being computed. This
641
- significantly speeds up data parallelism. For instance, as soon as the backward pass of the last layer is
642
- done (last boxes on the right) those gradients can already be gathered/summed while the backward pass
643
- computations move to earlier layers, aka to the left. This lowers the communication/bandwidth pressure to
644
- sync gradients of the full model as it can be performed in part in parallel to the computation of said
645
- gradients. See <a href="https://siboehm.com/articles/22/data-parallel-training">this article</a> for more
646
- information.</p>
647
-
648
- <p>A general recipe to determine an optimal data-parallel setup can be as follows:</p>
649
- <ol>
650
- <li>Determine the best (global) batch size in tokens to use either by consulting literature or running
651
- experiments? This determines the GBST.</li>
652
- <li>Select a sequence length for training, again by either consulting literature or running experiments.
653
- Generally 2-8k tokens works reliably well.</li>
654
- <li>You now know the batch size (GBS=GBST/SeqLen). Find the maximum MBS on a single GPU by increasing the
655
- local batch size until you run out of memory. This determines the MBS.</li>
656
- <li>Finally, the number of available GPUs corresponds to the potential DP. The ratio of GPT to DP determines
657
- the remaining number of gradient accumulation steps needed for the desired GBS.</li>
658
- </ol>
659
-
660
- <p>If the gradient accumulation ratio is lower than one, i.e. you have too many GPUs (!), you can either choose
661
- to not use all your GPUs or test if a lower MBS will speed up training. In these cases, you may want to
662
- prioritize throughput over the individual GPU utilization, you can then choose DP first and use a smaller
663
- MBS than possible in order to speed up training.</p>
664
-
665
- <p>Time to take a concrete example: We want to train a model with a GBS of 4M tokens and a sequence length of
666
- 4k. This means our batch size will be 1024 samples (we pick powers of two). We observe that a single of our
667
- GPU can fit MBS=2 in memory and we have 128 GPUs available for training. This means with 4 gradient
668
- accumulation steps we'll achieve our goal of 1024 samples or 4M tokens per training step. Now what if we
669
- suddenly have 1024 GPUs available? We can achieve the same GBS and thus identical training by setting both
670
- MBS and gradient accumulation to 1 speeding up training significantly.</p>
671
-
672
- <p>[EXPERIMENTS WHERE WE INCREASE DP AND SHOW THROUGHPUT FOR SEVERAL MODELS]</p>
673
-
674
- <p>We've explored data parallelism, a simple strategy to scale training across more GPUs and gives consistent
675
- speed improvements. The keen reader might have noticed however that it rests on the assumption that we can
676
- fit at least one input sample forward pass (<em>MBS=1</em>) into our GPU memory. This is not always the
677
- case! In particular for larger models which often don't fit into a single GPU anymore even with activation
678
- recomputations activated.</p>
679
-
680
- <p>In such case, we need to shard the model across devices! We'll now study two complementary sharding methods,
681
- tensor and pipeline parallelism which are doing that. Let's start by the simplest, tensor parallelism!</p>
682
-
683
- <h3>Tensor Parallelism</h3>
684
-
685
- <p>So you've exhausted all the previous textbook tricks to try to fit your model on a single GPU but it still
686
- doesn't fit? Let's try to distribute this model across several GPUs. Unlike DP we will not simply duplicate
687
- the model but various parts of the model instance will be living on various GPUs.</p>
688
-
689
- <p>If we take a look at a typical matrix multiplication (the core of a neural network), we can get an idea about
690
- how we could split the model:</p>
691
-
692
- <img src="assets/images/image%209.png" alt="Matrix Multiplication Example">
693
-
694
- <p>Tensor parallelism is a technique in which a tensor is split into N shards along a particular dimension
695
- across N GPUs. Matrices can be split either on the column part or row part leading to row and column
696
- parallelism. Depending on which splitting strategy we choose will require different communications
697
- primitives.</p>
698
-
699
- <p><strong>Column linear:</strong></p>
700
- <ul>
701
- <li>Splitting by column or row involves different synchronization primitives:
702
- <ul>
703
- <li>column:
704
- <ul>
705
- <li>A <strong>Broadcast</strong> operation is used to send the same input to different GPUs,
706
- </li>
707
- <li>Multiplications are done independently on the GPUs, and finally</li>
708
- <li>An <strong>All-gather</strong> operation is used to gather the output results.</li>
709
- </ul>
710
- </li>
711
- <li>Row:
712
- <ul>
713
- <li>A <strong>Scatter</strong> operation is used to split the input and send it to different
714
- GPUs (we split the weight row-wise),</li>
715
- <li>Multiplications are done independently on the GPUs, and finally</li>
716
- <li>An <strong>All-reduce</strong> operation is used to add the results together and the
717
- full output results.</li>
718
- </ul>
719
- </li>
720
- </ul>
721
- </li>
722
- </ul>
723
-
724
- <p>This was for an example matrix multiplication. How do we apply this in practice to a real model? In the
725
- Transformer, there are 2 basic building blocks where tensor parallel can be applied:</p>
726
- <ul>
727
- <li>Feedforward layers (MLP)</li>
728
- <li>Multi-Head Attention (MHA)</li>
729
- </ul>
730
-
731
- <p>Feedforward layers comprise 2 successive MLPs with a non-linearity in-between. Here is the first part of it:
732
- </p>
733
-
734
- <img src="assets/images/image%2012.png" alt="Feedforward Layers">
735
-
736
- <p>Should we use row or column parallelization for the first MLP?</p>
737
-
738
- <p>Well it turns out parallelized GeLU only works in Column schema:</p>
739
-
740
- <p>In column schema:</p>
741
- <d-math>
742
- GeLU(cat([XW1, XW2])) = cat([GeLU(XW1), GeLU(XW2)])
743
- </d-math>
744
-
745
- <p>In row schema:</p>
746
- <d-math>
747
- GeLU(XW1 + XW2) \neq GeLU(XW1) + GeLU(XW2)
748
- </d-math>
749
-
750
- <p>If you rather like code, note that we can prove this with the following snippet as well:</p>
751
-
752
- <d-code block language="python">
753
- ```
754
- </region_of_file_to_rewritten_file>
755
- def example_gelu():
756
- from torch.nn.functional import gelu
757
-
758
- X = torch.randn(4, 2, device="cuda", dtype=torch.float32)
759
- W = torch.randn(2, 2, device="cuda", dtype=torch.float32)
760
-
761
- W_0, W_1 = W.chunk(2, dim=1)
762
-
763
- # Column linear
764
- y_col_1 = torch.cat([gelu(X @ W_0), gelu(X @ W_1)], dim=1)
765
- y_col_2 = gelu(torch.cat([X @ W_0, X @ W_1], dim=1))
766
-
767
- # All match
768
- torch.testing.assert_close(y_col_1, y_col_2, rtol=1e-5, atol=1e-5)
769
-
770
- # Row linear
771
- X_0, X_1 = X.chunk(2, dim=1)
772
- W_0, W_1 = W.chunk(2, dim=0)
773
- y_row_1 = gelu(X_0 @ W_0) + gelu(X_1 @ W_1)
774
- y_row_2 = gelu(X_0 @ W_0 + X_1 @ W_1)
775
-
776
- # Mismatch
777
- torch.testing.assert_close(y_row_1, y_row_2, rtol=1e-5, atol=1e-5)
778
- </d-code>
779
-
780
- <p>To avoid a synchronization step directly after the first MLP, we'll thus start with Column Parallel and be
781
- able to directly perform parallel GELU.</p>
782
-
783
- <p>Now, what about the second MLP? Should it be column or row parallel? Let's draft both options:</p>
784
- <ul>
785
- <li>Column Parallel followed by Column Parallel</li>
786
- <img src="assets/images/image%2013.png" alt="Column Parallel Schema 1">
787
- <li>Column Parallel followed by Row Parallel</li>
788
- <img src="assets/images/image%2014.png" alt="Column Parallel Schema 2">
789
  </ul>
790
-
791
- <p>We see that the "Column Parallel followed by Row Parallel" schema only involves two communications instead of
792
- four. It's thus the most efficient schema in terms of communications.</p>
793
-
794
- <p>Let's take a quick look at the backward pass:</p>
795
- <img src="assets/images/image%2015.png" alt="Backward Pass 1">
796
- <img src="assets/images/image%2016.png" alt="Backward Pass 2">
797
-
798
- <d-code block language="python">
799
- def column_linear_forward(X, local_W, group):
800
- Y_local = X @ local_W.t()
801
- return Y_local
802
-
803
- def column_linear_backward(local_grad_Y, X, local_W, group):
804
- local_grad_X = local_grad_Y @ local_W
805
- grad_W = local_grad_Y.t() @ X
806
- return local_grad_X, grad_W
807
-
808
- def row_linear_forward(local_X, local_W, group):
809
- Y_local = local_X @ local_W.t()
810
- dist.all_reduce(Y_local, group=group)
811
- Y = Y_local
812
- return Y
813
-
814
- def row_linear_backward(grad_Y, X, local_W, group):
815
- local_grad_X = grad_Y @ local_W
816
- grad_W = grad_Y.t() @ X
817
- return local_grad_X, grad_W
818
-
819
- def example_column_row_linear():
820
- # torchrun --nproc_per_node=2 tp_all_reduce.py
821
- group = dist.distributed_c10d._get_default_group()
822
-
823
- X_ref = torch.arange(4 * 2, device="cuda", dtype=torch.float32, requires_grad=True).reshape(4, 2)
824
- W_ref_layer1 = torch.arange(1, 5, device="cuda", dtype=torch.float32, requires_grad=True).reshape(2, 2) * 10
825
- W_ref_layer2 = torch.arange(1, 5, device="cuda", dtype=torch.float32, requires_grad=True).reshape(2, 2)
826
-
827
- X_ref.retain_grad()
828
- W_ref_layer1.retain_grad()
829
- W_ref_layer2.retain_grad()
830
-
831
- dist.broadcast(X_ref, src=0, group=group)
832
- dist.broadcast(W_ref_layer1, src=0, group=group)
833
- dist.broadcast(W_ref_layer2, src=0, group=group)
834
-
835
- X = X_ref.clone()
836
- W_layer1 = W_ref_layer1.clone()
837
- W_layer2 = W_ref_layer2.clone()
838
-
839
- # Forward
840
- Y_ref_linear1 = X_ref @ W_ref_layer1.t()
841
- Y_ref_linear1.retain_grad()
842
-
843
- # We will transpose for matrix multiplication. As a result, we need to split row-wise
844
- Y_local_linear1 = column_linear_forward(X, split_tensor(W_layer1, dim=0), group)
845
-
846
- torch.testing.assert_close(Y_local_linear1, split_tensor(Y_ref_linear1, dim=1), rtol=1e-5, atol=1e-5)
847
-
848
- Y_local_linear2 = row_linear_forward(Y_local_linear1, split_tensor(W_ref_layer2, dim=1), group)
849
- Y_ref_linear2 = Y_ref_linear1 @ W_ref_layer2.t()
850
- torch.testing.assert_close(Y_local_linear2, Y_ref_linear2, rtol=1e-5, atol=1e-5)
851
-
852
- # Backward
853
- Y_ref_linear2.sum().backward()
854
-
855
- grad_Y = torch.ones_like(Y_ref_linear2)
856
- grad_X_linear2, grad_W_linear2 = row_linear_backward(grad_Y, Y_local_linear1, split_tensor(W_layer2, dim=1),
857
- group)
858
-
859
- torch.testing.assert_close(grad_X_linear2, split_tensor(Y_ref_linear1.grad, dim=1), rtol=1e-5, atol=1e-5)
860
- torch.testing.assert_close(grad_W_linear2, split_tensor(W_ref_layer2.grad, dim=1), rtol=1e-5, atol=1e-5)
861
-
862
- grad_X, grad_W = column_linear_backward(grad_X_linear2, X, split_tensor(W_layer1, dim=0), group)
863
-
864
- torch.testing.assert_close(grad_X, X_ref.grad, rtol=1e-5, atol=1e-5)
865
- torch.testing.assert_close(grad_W, split_tensor(W_ref_layer1.grad, dim=0), rtol=1e-5, atol=1e-5)
866
-
867
- if __name__ == "__main__":
868
- dist.init_process_group("nccl", rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
869
- torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
870
-
871
- example_column_row_linear()
872
- </d-code>
873
-
874
- <p>Now that we've found the most efficient schema for the Feedforward part of the transformer, let's take a look
875
- at the multi-head attention block (MHA).</p>
876
-
877
- <p>We can generally follow a similar approach where the Q, K, V will be split in a Column Parallel fashion and
878
- the output projection will be split along the Row dimension.</p>
879
-
880
- <img src="assets/images/image%2017.png" alt="Multi-Head Attention Block">
881
-
882
- <p>To dive in further particularities, a nice reference paper detailing TP is for instance <a
883
- href="https://arxiv.org/abs/2205.05198">Megatron-LM: Training Multi-Billion Parameter Language Models
884
- Using Model Parallelism</a>.</p>
885
-
886
- <p>Note: Sequence Parallel</p>
887
-
888
- <h3>Sequence Parallelism</h3>
889
-
890
- <p>Tensor parallelism has been a great help to parallelize some of our computation on several GPU nodes with the
891
- limited cost of a few communication operations.</p>
892
-
893
- <p>It also had the additional benefit of reducing memory usage by splitting intermediate activations inside the
894
- feedforward elements across GPUs and thereby reducing the activations to store on each node.</p>
895
-
896
- <p>Could we push this approach further?</p>
897
-
898
- <p>Sequence parallelism applies this same idea to other parts of our model. We've applied tensor parallelism to
899
- two main parts in our models where combination of MLP allowed to naturally split the weights along major
900
- axis.</p>
901
-
902
- <p>The rest of the model mostly comprises layer norms, dropout and various summation of residuals, these
903
- contribute little to the computation but come with rather large forward activations to store.</p>
904
-
905
- <p>[Add some illustration of the forward activations to store for each part]</p>
906
-
907
- <h3>Context Parallelism</h3>
908
-
909
- <p>Even though TP-SP mode helps reduce the memory used by activation values, it has two main drawbacks:</p>
910
- <ol>
911
- <li>Internode connections are usually slow, so the TP degree shouldn't typically exceed 8</li>
912
- <li>The TP degree is limited by the number of Key/Value heads, which is 8 for LLaMA 3 8B.</li>
913
- </ol>
914
-
915
- <p>An empirical estimation is that with TP=8, you can only train an 8B model with a 20K context length. However,
916
- LLaMA 3.1 has managed to scale the context length to 128K by using context parallelism.</p>
917
-
918
- <p>There are several ways to implement sequence parallelism. We used ring attention, which overlaps
919
- communication and computation. LLaMA3.1 uses all-gather along the sequence dimension because it is easier
920
- and more flexible to support different types of attention masks in all-gather based CP attention, such as
921
- the document mask.</p>
922
-
923
- <h3>Pipeline Parallelism</h3>
924
-
925
- <h3>Overlapping computation and communication</h3>
926
-
927
- <h3>ZeRO</h3>
928
-
929
- <h2>II – Architecture</h2>
930
-
931
- <h3>Transformers</h3>
932
-
933
- <h3>Choosing the right dimensions</h3>
934
-
935
- <h3>Positional Embeddings (Learned, RoPE, ALiBi)</h3>
936
-
937
- <h3>RoPE</h3>
938
-
939
- <p>In the transformer model, tokens have no inherent information about their positional information. For these
940
- reasons, we need to use a positional encoding function.</p>
941
-
942
- <p>Assuming that in the multi-head attention layer, <em>q_m</em> is the "position-aware" query vector
943
- corresponding to a token at position <em>m</em>, <em>k_n</em> the "position-aware" key vector corresponding
944
- to the token at position <em>n</em> and <em>f</em> is our position embedding function, we would like our
945
- position vector to be a function of the input vectors and absolute positions like this:</p>
946
-
947
- <d-math>
948
- q_m = f(q,m)
949
- k_n = f(k,n)
950
- </d-math>
951
-
952
- <p>We may also want the positional encoding to model relative positional information between two input tokens.
953
- Relative positions help the model to operate across longer context spans and even context lengths not seen
954
- during training. The attention operation is generally a dot product operation between "position-aware"
955
- vectors <em>q</em> and <em>k</em>, so for a positional encoding that contains relative positional
956
- information, we'll want to have:</p>
957
-
958
- <d-math>
959
- <q_m, k_n> = g(q, k, m-n)
960
- </d-math>
961
-
962
- <p>In other words, we want the result of <em>⟨ 𝑞_𝑚 , 𝑘_𝑛 ⟩</em> to depend on the values of <em>q</em> and
963
- <em>k</em> themselves, as well as their relative position <em>m − n</em>, but not <em>m</em> and <em>n</em>.
964
- This way, the model can focus on the relative difference between two tokens rather than their absolute
965
- positions.
966
- </p>
967
-
968
- <p>Let's show that the RoPE positional embedding formulation satisfies the above formula.</p>
969
-
970
- <p><strong>Rotation matrix</strong></p>
971
-
972
- <p>RoPE are based on rotation matrices which have simple and interesting properties for us. In a 2D space, a
973
- rotation matrix has the following form:</p>
974
-
975
- <d-math>
976
- R(θ) =
977
- \begin{pmatrix}
978
- \cosθ & -\sinθ \\
979
- \sinθ & \cosθ
980
- \end{pmatrix}
981
- </d-math>
982
-
983
- <p>The rotation matrix has the following properties:</p>
984
- <ul>
985
- <li><em>R(θ)</em><sup>T</sup> = <em>R(-θ)</em></li>
986
- <li><em>R(θ<sub>1</sub>)R(θ<sub>2</sub>) = R(θ<sub>1</sub>+θ<sub>2</sub>)</li>
987
- </ul>
988
-
989
- <img src="assets/images/rotation.jpeg" alt="Rotation Matrix">
990
-
991
- <p><strong>RoPE in 2D space</strong></p>
992
-
993
- <p>Assuming <em>q</em> and <em>k</em> are 2D column vectors, we can show that:</p>
994
-
995
- <d-math>
996
- <R(θ_1)q, R(θ_2)k> = (R(θ_1)q)<sup>T</sup> (R(θ_2)k) = q<sup>T</sup>R(-θ_1)R(θ_2)k =
997
- q<sup>T</sup>R(θ_2-θ_1)k = (R(θ_1-θ_2)q)<sup>T</sup>k = <R(θ_1-θ_2)q,k>
998
- </d-math>
999
-
1000
- <p>Therefore, if we define our position embedding like this: <em>f(x, m) = R(mθ)x</em> where <em>R</em> is a 2D
1001
- rotation matrix, we have <em>q_m = R(mθ)q</em> and <em>k_n = R(nθ)k</em> and then:</p>
1002
-
1003
- <d-math>
1004
- <q_m, k_n> = <R(mθ)q, R(nθ)k> = <R((m-n)θ)q, k>
1005
- </d-math>
1006
-
1007
- <p>We can see that a multiplication with a rotation matrix is exactly the positional encoding we were looking
1008
- for. The result of <em>⟨ 𝑞_𝑚 , 𝑘_𝑛 ⟩</em> only depends on <em>q</em>, <em>k</em> and <em>m-n</em>.</p>
1009
-
1010
- <p><strong>Implementation</strong></p>
1011
-
1012
- <p>In our case, our internal vectors (the activations in our model) have much more than two elements. Let's pair
1013
- elements to get 2D vectors and apply the 2D rotation operation on these pairs.</p>
1014
-
1015
- <p>There are combinatorially many ways we can pair elements but generally two options are the most popular for
1016
- implementing RoPE: we call them the <em>interleaved</em> and <em>non-interleaved</em> versions. (It's still
1017
- rather unfortunate to have two popular options)</p>
1018
-
1019
- <ol>
1020
- <li>In the interleaved version, we pair consecutive elements <em>(x<sub>0</sub>,
1021
- x<sub>1</sub>),(x<sub>2</sub>,x<sub>3</sub>),…</em> before applying the rotation matrix:</li>
1022
- <d-math>
1023
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
1024
- x_0 \\
1025
- x_1 \\
1026
- x_2 \\
1027
- x_3 \\
1028
- \vdots \\
1029
- x_{d-2} \\
1030
- x_{d-1}
1031
- \end{pmatrix}
1032
- \odot
1033
- \begin{pmatrix}
1034
- \cos mθ_0 \\
1035
- \cos mθ_0 \\
1036
- \cos mθ_1 \\
1037
- \cos mθ_1 \\
1038
- \vdots \\
1039
- \cos mθ_{d/2-1} \\
1040
- \cos mθ_{d/2-1}
1041
- \end{pmatrix}
1042
- +
1043
- \begin{pmatrix}
1044
- -x_1 \\
1045
- x_0 \\
1046
- -x_3 \\
1047
- x_2 \\
1048
- \vdots \\
1049
- -x_{d-1} \\
1050
- x_{d-2}
1051
- \end{pmatrix}
1052
- \odot
1053
- \begin{pmatrix}
1054
- \sin mθ_0 \\
1055
- \sin mθ_0 \\
1056
- \sin mθ_1 \\
1057
- \sin mθ_1 \\
1058
- \vdots \\
1059
- \sin mθ_{d/2-1} \\
1060
- \sin mθ_{d/2-1}
1061
- \end{pmatrix}
1062
- </d-math>
1063
- <d-math>
1064
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
1065
- x_0\cos mθ_0 - x_1\sin mθ_0 \\
1066
- x_1\cos mθ_0 + x_0\sin mθ_0 \\
1067
- x_2\cos mθ_1 - x_3\sin mθ_1 \\
1068
- x_3\cos mθ_1 + x_2\sin mθ_1 \\
1069
- \vdots \\
1070
- x_{d-2}\cos mθ_{d/2-1} - x_{d-1}\sin mθ_{d/2-1} \\
1071
- x_{d-1}\cos mθ_{d/2-1} + x_{d-2}\sin mθ_{d/2-1}
1072
- \end{pmatrix}
1073
- </d-math>
1074
- <li>In the non-interleaved version, we split the vector in two to pair elements as follows:
1075
- <em>(x<sub>0</sub>, x<sub>d/2</sub>),(x<sub>1</sub>,x<sub>d/2+1</sub>),…</em> This is the implementation
1076
- used in the <code>transformers</code> library:
1077
- </li>
1078
- <d-math>
1079
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
1080
- x_0 \\
1081
- x_1 \\
1082
- \vdots \\
1083
- x_{d/2-1} \\
1084
- x_{d/2} \\
1085
- x_{d/2+1} \\
1086
- \vdots \\
1087
- x_{d-1}
1088
- \end{pmatrix}
1089
- \odot
1090
- \begin{pmatrix}
1091
- \cos mθ_0 \\
1092
- \cos mθ_1 \\
1093
- \vdots \\
1094
- \cos mθ_{d/2-1} \\
1095
- \cos mθ_{0} \\
1096
- \cos mθ_{1} \\
1097
- \vdots \\
1098
- \cos mθ_{d/2-1}
1099
- \end{pmatrix}
1100
- +
1101
- \begin{pmatrix}
1102
- -x_{d/2} \\
1103
- -x_{d/2+1} \\
1104
- \vdots \\
1105
- -x_{d-1} \\
1106
- x_{0} \\
1107
- x_{1} \\
1108
- \vdots \\
1109
- x_{d/2-1}
1110
- \end{pmatrix}
1111
- \odot
1112
- \begin{pmatrix}
1113
- \sin mθ_0 \\
1114
- \sin mθ_1 \\
1115
- \vdots \\
1116
- \sin mθ_{d/2-1} \\
1117
- \sin mθ_{0} \\
1118
- \sin mθ_{1} \\
1119
- \vdots \\
1120
- \sin mθ_{d/2-1}
1121
- \end{pmatrix}
1122
- </d-math>
1123
- <d-math>
1124
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
1125
- x_0\cos mθ_0 - x_{d/2}\sin mθ_0 \\
1126
- x_1\cos mθ_1 - x_{d/2+1}\sin mθ_1 \\
1127
- \vdots \\
1128
- x_{d/2-1}\cos mθ_{d/2-1} - x_{d-1}\sin mθ_{d/2-1} \\
1129
- x_{d/2}\cos mθ_0 + x_0\sin mθ_0 \\
1130
- x_{d/2+1}\cos mθ_1 + x_0\sin mθ_1 \\
1131
- \vdots \\
1132
- x_{d-1}\cos mθ_{d/2-1} + x_{d-1}\sin mθ_{d/2-1} \\
1133
- \end{pmatrix}
1134
- </d-math>
1135
- <p>The angle of rotation, <em>θ<sub>i</sub></em> is defined as follows, where <em>d</em> is the dimension of
1136
- the attention head:</p>
1137
- <d-math>
1138
- θ<sub>i</sub> = base<sup>-2(i-1)/d</sup>, i \in [1,2,...,d/2]
1139
- </d-math>
1140
- <p>How does this look? When moving the same distance, vectors in some dimensions rotate faster than vectors
1141
- in other dimensions.</p>
1142
- <img src="assets/images/rotation_speed.jpeg" alt="Rotation Speed">
1143
  </ol>
 
 
 
 
 
1144
 
1145
- <h3>Attention (MHA, MQA, GQA)</h3>
1146
-
1147
- <h2>Optimized Operations</h2>
1148
-
1149
- <h3>Flash Attention 1&2&3</h3>
1150
-
1151
- <h3>Fused Kernels</h3>
1152
-
1153
- <h2>III – Training Recipe</h2>
1154
-
1155
- <h3>Batch Size</h3>
1156
-
1157
- <h3>Initialization + rescaling activations inside the model</h3>
1158
-
1159
- <h3>Numerical Precision</h3>
1160
-
1161
- <h4>FP16/BF16/FP8</h4>
1162
-
1163
- <p>@Phuc Nguyen?</p>
1164
-
1165
- <h3>Long Context Training</h3>
1166
-
1167
- <h3>Evaluation</h3>
1168
-
1169
- <p>@Haojun Zhao</p>
1170
-
1171
- <h3>Infini-Attention</h3>
1172
-
1173
- <p>@Phuc Nguyen</p>
1174
-
1175
- <h3>Ring Attention</h3>
1176
-
1177
- <p>@Haojun Zhao</p>
1178
-
1179
- <h3>RoPE scaling / Yarn</h3>
1180
-
1181
- <p>@Haojun Zhao maybe?</p>
1182
-
1183
- <h2>References</h2>
1184
-
1185
- <ul>
1186
- <li>Harm's posts:
1187
- <ul>
1188
- <li><a
1189
- href="https://www.harmdevries.com/post/context-length/">https://www.harmdevries.com/post/context-length/</a>
1190
- </li>
1191
- <li><a
1192
- href="https://www.harmdevries.com/post/model-size-vs-compute-overhead/">https://www.harmdevries.com/post/model-size-vs-compute-overhead/</a>
1193
- </li>
1194
- </ul>
1195
- </li>
1196
- <li>Stas' guides:
1197
- <ul>
1198
- <li><a href="https://github.com/stas00/ml-engineering">https://github.com/stas00/ml-engineering</a>
1199
- </li>
1200
- <li><a
1201
- href="https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md">https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md</a>
1202
- </li>
1203
- </ul>
1204
- </li>
1205
- <li>data parallel: <a
1206
- href="https://siboehm.com/articles/22/data-parallel-training">https://siboehm.com/articles/22/data-parallel-training</a>
1207
- </li>
1208
- <li>ZeRO: <a href="https://arxiv.org/abs/1910.02054">https://arxiv.org/abs/1910.02054</a></li>
1209
- <li>TP/SP + Selective Recomputation: <a
1210
- href="https://arxiv.org/abs/2205.05198">https://arxiv.org/abs/2205.05198</a></li>
1211
- </ul>
1212
- <h2>Conclusion and looking forward</h2>
1213
- <p>Through our open science efforts we hope to keep shining a light on the black box that is the training of
1214
- high performance large language models as well as to give every model trainer the ability to create
1215
- state-of-the-art LLMs. We are excited to continue iterating on FineWeb and to release increasingly better
1216
- filtered subsets of web data, in a fully open and reproducible manner.</p>
1217
- <p>In the short term, we are looking forward to applying the learnings from (English) FineWeb to other
1218
- languages. While English currently dominates the LLM landscape, we believe that making high quality web data
1219
- in other languages as accessible as possible would be incredibly impactful.</p>
1220
- <p>In a nutshell: the future is bright and exciting for studying the science of creating datasets at scale and
1221
- in the open 🤗.</p>
1222
  </d-article>
1223
 
1224
  <d-appendix>
@@ -1244,16 +238,12 @@
1244
  <h3 id="citation">Citation</h3>
1245
  <p>For attribution in academic contexts, please cite this work as</p>
1246
  <pre
1247
- class="citation short">Penedo, et al., "The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale", 2024.</pre>
1248
  <p>BibTeX citation</p>
1249
- <pre class="citation long">@misc{penedo2024finewebdatasetsdecantingweb,
1250
- title={The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale},
1251
- author={Guilherme Penedo and Hynek Kydlíček and Loubna Ben allal and Anton Lozhkov and Margaret Mitchell and Colin Raffel and Leandro Von Werra and Thomas Wolf},
1252
- year={2024},
1253
- eprint={2406.17557},
1254
- archivePrefix={arXiv},
1255
- primaryClass={cs.CL}
1256
- url={https://arxiv.org/abs/2406.17557},
1257
  }</pre>
1258
  </d-appendix>
1259
 
 
7
  <meta name="viewport" content="width=device-width, initial-scale=1">
8
  <meta charset="utf8">
9
  <base target="_blank">
10
+ <title>The Ultra-Scale Playbook: Training LLMs on GPU Clusters</title>
11
  <link rel="stylesheet" href="style.css">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  </head>
13
 
14
  <body>
15
  <d-front-matter>
16
  <script id='distill-front-matter' type="text/json">{
17
+ "title": "The Ultra-Scale Playbook: Training LLMs on GPU Clusters",
18
+ "description": "This blog covers everything about scaling LLMs in 2025.",
19
  "published": "Sept 28, 2024",
20
  "affiliation": {"name": "HuggingFace"},
21
  "authors": [
 
37
  </script>
38
  </d-front-matter>
39
  <d-title>
40
+ <h1 class="l-page" style="text-align: center;">The Ultra-Scale Playbook: Training LLMs on GPU Clusters</h1>
41
  <div id="title-plot" class="main-plot-container l-screen">
42
  <figure>
43
  <img src="assets/images/banner.png" alt="FineWeb">
 
51
  <d-article>
52
  <d-contents>
53
  </d-contents>
54
+
55
+ <p>Fueled by the scaling laws<d-cite bibtex-key="kaplan2020scalinglaws"></d-cite><d-cite bibtex-key="hoffmann2022chinchilla"></d-cite>, the trend of training ever larger language models on vaster amounts of data has been driving progress in AI for the past couple years. Initially, the development of the largest models happened exclusively behind closed doors of a handful of research labs but recently opened up more with the release of models such as Llama 3.1 405B<d-cite bibtex-key="grattafiori2024llama3herdmodels"></d-cite> and DeepSeek R1<d-cite bibtex-key="deepseekai2024deepseekv3technicalreport"></d-cite>. While these models have <a href="https://huggingface.co/meta-llama">openly shared</a> <a href="https://huggingface.co/deepseek-ai">weights</a> and their training recipes are described in <a href="https://ai.meta.com/research/publications/the-llama-3-herd-of-models/">technical</a> <a href="https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf">reports</a>, the challenging engineering to involved to train at the necessary infrastructure scale is still hidden between the lines of a handful of papers and complex training frameworks. This ~~long blog post~~ open-source book is here to open this black box!</p>
56
 
 
 
 
 
57
  <aside>Reading time: 7 days. For the best reading experience, we recommend not using a mobile phone.</aside>
58
 
59
+ <p>In this book we invite you to follow us in the wonderful world of scaling training of Large Language Models to tens, hundreds, thousands of GPUs. It assumes you know the basics on LLM architecture and training, but are new to distributed training. This writing can be seen as a second part of a trilogy following our first blog on processing data for pre-training, the so-called “<a href="https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1">FineWeb blog post</a>”. Having read both blog posts, you should have almost all the core knowledge needed to deeply understand how LLMs are being built nowadays, just missing a bit the final spices like data mixing or architecture choices to complete the recipe (stay tuned…).</p>
60
+
61
+ <p>Pre-training LLMs from scratch now requires amounts of compute which exceed in almost every case the use of a single GPU or machine. The clusters used to train these models range from hundreds to thousands of nodes each usually equipped with 4 to 8 GPUs. To make the best use of such an expensive hardware as well as to train in a reasonable time, a range of distributed training methods have been developed with the goal of ensuring that GPUs are highly utilized at all times. Efficiently scaling LLM training is also not confined to pretraining anymore, as fine-tuning larger models on more domain specific data is becoming the standard practice to achieve the best results.</p>
 
 
62
 
63
  <aside>We are extremely thankful to the whole <a href="https://distill.pub/">distill.pub</a> team for creating
64
  the template on which we based this blog post.</aside>
65
+
66
+ <p>In this post we’ll cover these scaling methods exhaustively while keeping a single story-line to understand where each technique comes from. We’ll cover data, tensor, pipeline and context parallelism as well as ZeRO and kernel fusion. The post is built on the following <strong>three foundations</strong>:</p>
67
+
68
+ <p><strong>Quick intros on theory and concepts:</strong> before diving into code and experiments, we want to understand how each method works at a high level and what it’s advantages and limits are. You’ll learn about which parts of a language model eat away your memory and when during training it happens. You’ll learn how we can solve memory constraints by parallelizing the models and increase the throughput by scaling up GPUs. As a result you'll understand how the following widget to compute the memory breakdown of a transformer model works: </p>
69
 
70
+ <div id="graph"></div>
71
  <div id="controls">
72
+ <div class="cell column-1">
73
+ <label for="a">Attention Heads (a):</label>
74
+ <input type="range" id="a" name="a" min="1" max="128" value="8">
75
+ <input type="number" id="a_input" value="8" min="1" max="128">
 
 
 
 
 
 
 
76
  </div>
77
+ <div class="cell column-2">
78
+ <label for="mixed">Mixed Precision:</label>
79
+ <input type="checkbox" id="mixed" name="mixed" checked>
80
+ <span></span> <!-- Empty span to maintain grid alignment -->
 
 
 
 
 
 
 
81
  </div>
82
+ <div class="cell column-1">
83
+ <label for="b">Micro Batch Size (b):</label>
84
+ <input type="range" id="b" name="b" min="1" max="53248" value="32">
85
+ <input type="number" id="b_input" value="32" min="1" max="53248">
86
+ </div>
87
+ <div class="cell column-2">
88
+ <label for="seq_parallel">Sequence Parallelism:</label>
89
+ <input type="checkbox" id="seq_parallel" name="seq_parallel">
90
+ <span></span> <!-- Empty span to maintain grid alignment -->
91
+ </div>
92
+ <div class="cell column-1">
93
+ <label for="h">Hidden Dimension (h):</label>
94
+ <input type="range" id="h" name="h" min="1" max="16384" value="512">
95
+ <input type="number" id="h_input" value="512" min="128" max="16384">
96
+ </div>
97
+ <div class="cell column-2">
98
+ <label for="recomputation">Recomputation:</label>
99
+ <select id="recomputation" name="recomputation">
100
+ <option value="none">None</option>
101
+ <option value="selective">Selective</option>
102
+ <option value="full">Full</option>
103
+ </select>
104
+ <span></span> <!-- Empty span to maintain grid alignment -->
105
+ </div>
106
+ <div class="cell column-1">
107
+ <label for="h_ff">Feedforward Dimension (h_ff):</label>
108
+ <input type="range" id="h_ff" name="h_ff" min="1" max="65536" value="2048">
109
+ <input type="number" id="h_ff_input" value="2048" min="512" max="65536">
110
  </div>
111
+ <div class="cell column-2">
112
+ <label for="zero">Zero:</label>
113
+ <select id="zero" name="zero">
114
+ <option value="0">0</option>
115
+ <option value="1">1</option>
116
+ <option value="2">2</option>
117
+ <option value="3">3</option>
118
+ </select>
119
+ <span></span> <!-- Empty span to maintain grid alignment -->
 
 
 
 
 
 
 
120
  </div>
121
+ <div class="cell column-1">
122
+ <label for="L">Number of Layers (L):</label>
123
+ <input type="range" id="L" name="L" min="1" max="126" value="12">
124
+ <input type="number" id="L_input" value="12" min="1" max="126">
 
 
 
 
 
 
 
 
 
 
 
125
  </div>
126
+ <div class="cell column-2">
127
+ <label for="ff_activation">FF Activation:</label>
128
+ <select id="ff_activation" name="ff_activation">
129
+ <option value="relu">ReLU</option>
130
+ <option value="gelu">GELU</option>
131
+ <option value="swiglu">SwiGLU</option>
132
+ </select>
133
+ <span></span> <!-- Empty span to maintain grid alignment -->
 
 
 
 
 
 
 
 
134
  </div>
135
+ <div class="cell column-1">
136
+ <label for="s">Sequence Length (s):</label>
137
+ <input type="range" id="s" name="s" min="1" max="128000" value="128">
138
+ <input type="number" id="s_input" value="128" min="64" max="128000">
 
 
 
 
 
 
 
139
  </div>
140
+ <div class="cell column-2">
141
+ <label for="presets">Presets:</label>
142
+ <select id="presets" name="presets">
143
+ <option value="Llama 3 Tiny">Llama 3 Tiny</option>
144
+ <option value="Llama 3 8B">Llama 3 8B</option>
145
+ <option value="Llama 3 70B">Llama 3 70B</option>
146
+ <option value="Llama 3 405B">Llama 3 405B</option>
147
+ </select>
148
+ <span></span> <!-- Empty span to maintain grid alignment -->
149
+ </div>
150
+ <div class="cell column-1">
151
+ <label for="v">Vocabulary Size (v):</label>
152
+ <input type="range" id="v" name="v" min="1000" max="100000" value="30522">
153
+ <input type="number" id="v_input" value="30522" min="1000" max="100000">
154
+ </div>
155
+ <div class="cell column-2">
156
+ <label for="tp">Tensor Parallelism (t):</label>
157
+ <input type="range" id="tp" name="tp" min="1" max="16" value="8">
158
+ <input type="number" id="tp_input" value="8" min="1" max="16">
159
+ </div>
160
+ <div class="cell column-1">
161
+ <label for="k">Optimizer Parameters (k):</label>
162
+ <input type="range" id="k" name="k" min="1" max="16" value="8">
163
+ <input type="number" id="k_input" value="8" min="1" max="16">
164
+ </div>
165
+ <div class="cell column-2">
166
+ <label for="dp">Data Parallelism (d):</label>
167
+ <input type="range" id="dp" name="dp" min="1" max="256" value="1">
168
+ <input type="number" id="dp_input" value="1" min="1" max="256">
169
  </div>
170
  </div>
171
 
172
+ <p>While this widget gives a theoretical breakdown the following tool can be used to predict the memory usage:</p>
173
+
174
+ <p><img alt="image.png" src="assets/images/placeholder.png"/></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
+ <p><strong>Clear code implementations:</strong> theory is one thing, but we discover all kinds of edge cases and important details when we implement something. That’s why we link to implementation references where possible. Depending on the case, we’ll use two code references: the <a href="https://github.com/huggingface/picotron">picotron</a> repository is built for education, thus it implements concepts usually in single, self-contained short files. On the other hand, to look at production ready code, we’ll refer to the <a href="https://github.com/huggingface/nanotron">nanotron</a> implementations which is a production training codebase used at Hugging Face.</p>
177
 
178
+ <p><img alt="Picotron implements each key concept in a self-contained way, such that the method can be studied separately and in isolation." src="assets/images/placeholder.png" /></p>
 
 
 
 
179
 
180
+ <p><strong>Real training efficiency benchmarks:</strong> Finally, how to <em>actually</em> scale your LLM training depends on your infrastructure, such as the kind of chips, interconnect etc., and we can’t give a single unified recipe. What we will give though is a way to benchmark several setups and it is what we have done on our cluster! We ran over 4100 distributed experiments with up to 512 GPUs to scan many possible distributed training layouts and model sizes. TODO: link to dataset too </p>
 
 
 
 
 
181
 
182
+ <p><img alt="An overview of the over 4000 experiments across all Llama architectures where each data point corresponds to an experiment launch." src="assets/images/placeholder.png" /></p>
 
 
183
 
184
+ <p>As you can see, there’s a lot of ground to be covered. Before getting into the trenches of distributed training let’s take a quick high level look on we’ll cover in the post.</p>
 
185
 
186
+ <h2>TL;DR</h2>
187
+ <p>This book is very extensive so we decide to start with a very general overview of how you can think about distributed training. At a high level, the key challenge in scaling LLM training is to make a training step (forward/backward/optimizer step) with a large batch size the fastest possible.</p>
188
+ <p>When scaling up models and input batches, we quickly end up in situations where either our target batch size won't fit in memory, or/and the model itself is too large to fit in a single GPU's memory.</p>
189
+ <p>To solve this scaling issue we’ll need to carefully evaluate different parallelization strategies and find the optimal balance between three main factors:</p>
190
  <ol>
191
+ <li><strong>Memory Usage</strong><ul>
192
+ <li>Hard limitation - if a training step doesn't fit in memory, training cannot proceed</li>
193
+ <li>Sometimes we can increase compute (e.g. recomputation) or increase communication (e.g. ZeRO) to reduce memory usage</li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  </ul>
195
+ </li>
196
+ <li><strong>Compute Efficiency</strong><ul>
197
+ <li>Memory transfer can also decrease compute efficiency.</li>
198
+ <li>We want our hardware to spend most time computing, so we need to reduce time spent on data transfers or unoptimized kernels.</li>
199
+ <li>GPUs need sufficient workload (large enough matrices/batch sizes) to maintain high utilization (compute-bound) otherwise they become memory-bound (limited by memory bandwidth).</li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  </ul>
201
+ </li>
202
+ <li><strong>Communication overhead</strong><ul>
203
+ <li>Two main types. For GPUs: intra-node (NVLink TODO: bandwidth) and inter-node (network TODO: bandwidth)</li>
204
+ <li>Two main attributes: base latency and peak bandwidth. Base latency is a constant overhead that makes us want to do the least number of comms possible, and peak bandwidth controls the how fast we can move data between gpus</li>
205
+ <li>We prioritize using the fastest communication channels (like NVLink) for operations that occur frequently and/or block computation (e.g. tensor parallelism)</li>
206
+ <li>We want to minimize communication overhead as it keeps GPUs idle, so we try to overlap communication with compute as much as possible</li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  </ul>
208
+ </li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  </ol>
210
+ <p>But let’s not get too much ahead of our self and scale progressively. To guide you along the journey and as a practical reference we summarized the key concepts in a cheatsheet:</p>
211
+ <p>[TODO: ADD CHEATSHEET]</p>
212
+ <p>Now that we nailed a few key concept and terms let’s get started by revisiting the basic training steps of an LLM!</p>
213
+
214
+ <h2>First Steps: Training on one GPU</h2>
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  </d-article>
217
 
218
  <d-appendix>
 
238
  <h3 id="citation">Citation</h3>
239
  <p>For attribution in academic contexts, please cite this work as</p>
240
  <pre
241
+ class="citation short">XXX, et al., "The Ultra-Scale Playbook: Training LLMs on GPU Clusterse", 2025.</pre>
242
  <p>BibTeX citation</p>
243
+ <pre class="citation long">@misc{TODO,
244
+ title={The Ultra-Scale Playbook: Training LLMs on GPU Clusters},
245
+ author={TODO},
246
+ year={2025},
 
 
 
 
247
  }</pre>
248
  </d-appendix>
249
 
dist/main.bundle.js CHANGED
The diff for this file is too large to render. See raw diff
 
dist/main.bundle.js.map CHANGED
The diff for this file is too large to render. See raw diff
 
dist/style.css CHANGED
@@ -246,6 +246,10 @@ d-article aside {
246
  margin-bottom: 1em;
247
  }
248
 
 
 
 
 
249
  @media (min-width: 768px) {
250
  d-article aside {
251
  margin-bottom: 0;
@@ -257,3 +261,78 @@ d-contents nav > ul > li > a:hover {
257
  text-decoration: none;
258
  }
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  margin-bottom: 1em;
247
  }
248
 
249
+ d-article img {
250
+ max-width: 100%;
251
+ }
252
+
253
  @media (min-width: 768px) {
254
  d-article aside {
255
  margin-bottom: 0;
 
261
  text-decoration: none;
262
  }
263
 
264
+ /* memory */
265
+ #controls {
266
+ display: grid;
267
+ grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
268
+ column-gap: 10px;
269
+ margin-bottom: 20px;
270
+ max-width: 100%;
271
+ @supports (container-type: inline-size) {
272
+ container-type: inline-size;
273
+ }
274
+ }
275
+
276
+ #controls .cell {
277
+ padding: 1px;
278
+ box-sizing: border-box;
279
+ }
280
+
281
+ #controls .column-1 {
282
+ display: flex;
283
+ align-items: center;
284
+ }
285
+
286
+ #controls .column-2 {
287
+ display: flex;
288
+ align-items: center;
289
+ }
290
+ @supports (container-type: inline-size) {
291
+ @container (max-width: 600px) {
292
+ #controls .column-2 {
293
+ order: 2;
294
+ }
295
+ }
296
+ }
297
+
298
+ #controls label {
299
+ text-align: right;
300
+ padding-right: 10px;
301
+ flex: 0 0 auto;
302
+ width: 150px;
303
+ line-height: 1.5em;
304
+ font-size: 0.8em;
305
+ }
306
+
307
+ #controls input[type="range"] {
308
+ width: 50%;
309
+ margin: 0 10px;
310
+ }
311
+
312
+ #controls input[type="number"] {
313
+ flex-shrink: 0;
314
+ width: 60px;
315
+ height: 24px;
316
+ border: 1px solid var(--distill-gray-light);
317
+ border-radius: 0.2rem;
318
+ }
319
+
320
+ #controls select {
321
+ width: 100%;
322
+ min-height: 28px;
323
+ border: 1px solid var(--distill-gray-light);
324
+ border-radius: 0.2rem;
325
+ }
326
+
327
+ #controls .column {
328
+ display: contents;
329
+ }
330
+
331
+ #graph svg {
332
+ font-family: sans-serif;
333
+ }
334
+
335
+ #graph svg rect {
336
+ cursor: pointer;
337
+ }
338
+
package-lock.json CHANGED
The diff for this file is too large to render. See raw diff
 
src/bibliography.bib CHANGED
@@ -331,4 +331,40 @@ url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md}
331
  eprint={2212.05129},
332
  archivePrefix={arXiv},
333
  primaryClass={cs.AI}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
  }
 
331
  eprint={2212.05129},
332
  archivePrefix={arXiv},
333
  primaryClass={cs.AI}
334
+ }
335
+ @misc{kaplan2020scalinglaws,
336
+ title={Scaling Laws for Neural Language Models},
337
+ author={Jared Kaplan and Sam McCandlish and Tom Henighan and Tom B. Brown and Benjamin Chess and Rewon Child and Scott Gray and Alec Radford and Jeffrey Wu and Dario Amodei},
338
+ year={2020},
339
+ eprint={2001.08361},
340
+ archivePrefix={arXiv},
341
+ primaryClass={cs.LG},
342
+ url={https://arxiv.org/abs/2001.08361},
343
+ }
344
+ @misc{hoffmann2022chinchilla,
345
+ title={Training Compute-Optimal Large Language Models},
346
+ author={Jordan Hoffmann and Sebastian Borgeaud and Arthur Mensch and Elena Buchatskaya and Trevor Cai and Eliza Rutherford and Diego de Las Casas and Lisa Anne Hendricks and Johannes Welbl and Aidan Clark and Tom Hennigan and Eric Noland and Katie Millican and George van den Driessche and Bogdan Damoc and Aurelia Guy and Simon Osindero and Karen Simonyan and Erich Elsen and Jack W. Rae and Oriol Vinyals and Laurent Sifre},
347
+ year={2022},
348
+ eprint={2203.15556},
349
+ archivePrefix={arXiv},
350
+ primaryClass={cs.CL},
351
+ url={https://arxiv.org/abs/2203.15556},
352
+ }
353
+ @misc{grattafiori2024llama3herdmodels,
354
+ title={The Llama 3 Herd of Models},
355
+ author={Aaron Grattafiori and Abhimanyu Dubey and Abhinav Jauhri and Abhinav Pandey and Abhishek Kadian and Ahmad Al-Dahle and Aiesha Letman and Akhil Mathur and Alan Schelten and Alex Vaughan and Amy Yang and Angela Fan and Anirudh Goyal and Anthony Hartshorn and Aobo Yang and Archi Mitra and Archie Sravankumar and Artem Korenev and Arthur Hinsvark and Arun Rao and Aston Zhang and Aurelien Rodriguez and Austen Gregerson and Ava Spataru and Baptiste Roziere and Bethany Biron and Binh Tang and Bobbie Chern and Charlotte Caucheteux and Chaya Nayak and Chloe Bi and Chris Marra and Chris McConnell and Christian Keller and Christophe Touret and Chunyang Wu and Corinne Wong and Cristian Canton Ferrer and Cyrus Nikolaidis and Damien Allonsius and Daniel Song and Danielle Pintz and Danny Livshits and Danny Wyatt and David Esiobu and Dhruv Choudhary and Dhruv Mahajan and Diego Garcia-Olano and Diego Perino and Dieuwke Hupkes and Egor Lakomkin and Ehab AlBadawy and Elina Lobanova and Emily Dinan and Eric Michael Smith and Filip Radenovic and Francisco Guzmán and Frank Zhang and Gabriel Synnaeve and Gabrielle Lee and Georgia Lewis Anderson and Govind Thattai and Graeme Nail and Gregoire Mialon and Guan Pang and Guillem Cucurell and Hailey Nguyen and Hannah Korevaar and Hu Xu and Hugo Touvron and Iliyan Zarov and Imanol Arrieta Ibarra and Isabel Kloumann and Ishan Misra and Ivan Evtimov and Jack Zhang and Jade Copet and Jaewon Lee and Jan Geffert and Jana Vranes and Jason Park and Jay Mahadeokar and Jeet Shah and Jelmer van der Linde and Jennifer Billock and Jenny Hong and Jenya Lee and Jeremy Fu and Jianfeng Chi and Jianyu Huang and Jiawen Liu and Jie Wang and Jiecao Yu and Joanna Bitton and Joe Spisak and Jongsoo Park and Joseph Rocca and Joshua Johnstun and Joshua Saxe and Junteng Jia and Kalyan Vasuden Alwala and Karthik Prasad and Kartikeya Upasani and Kate Plawiak and Ke Li and Kenneth Heafield and Kevin Stone and Khalid El-Arini and Krithika Iyer and Kshitiz Malik and Kuenley Chiu and Kunal Bhalla and Kushal Lakhotia and Lauren Rantala-Yeary and Laurens van der Maaten and Lawrence Chen and Liang Tan and Liz Jenkins and Louis Martin and Lovish Madaan and Lubo Malo and Lukas Blecher and Lukas Landzaat and Luke de Oliveira and Madeline Muzzi and Mahesh Pasupuleti and Mannat Singh and Manohar Paluri and Marcin Kardas and Maria Tsimpoukelli and Mathew Oldham and Mathieu Rita and Maya Pavlova and Melanie Kambadur and Mike Lewis and Min Si and Mitesh Kumar Singh and Mona Hassan and Naman Goyal and Narjes Torabi and Nikolay Bashlykov and Nikolay Bogoychev and Niladri Chatterji and Ning Zhang and Olivier Duchenne and Onur Çelebi and Patrick Alrassy and Pengchuan Zhang and Pengwei Li and Petar Vasic and Peter Weng and Prajjwal Bhargava and Pratik Dubal and Praveen Krishnan and Punit Singh Koura and Puxin Xu and Qing He and Qingxiao Dong and Ragavan Srinivasan and Raj Ganapathy and Ramon Calderer and Ricardo Silveira Cabral and Robert Stojnic and Roberta Raileanu and Rohan Maheswari and Rohit Girdhar and Rohit Patel and Romain Sauvestre and Ronnie Polidoro and Roshan Sumbaly and Ross Taylor and Ruan Silva and Rui Hou and Rui Wang and Saghar Hosseini and Sahana Chennabasappa and Sanjay Singh and Sean Bell and Seohyun Sonia Kim and Sergey Edunov and Shaoliang Nie and Sharan Narang and Sharath Raparthy and Sheng Shen and Shengye Wan and Shruti Bhosale and Shun Zhang and Simon Vandenhende and Soumya Batra and Spencer Whitman and Sten Sootla and Stephane Collot and Suchin Gururangan and Sydney Borodinsky and Tamar Herman and Tara Fowler and Tarek Sheasha and Thomas Georgiou and Thomas Scialom and Tobias Speckbacher and Todor Mihaylov and Tong Xiao and Ujjwal Karn and Vedanuj Goswami and Vibhor Gupta and Vignesh Ramanathan and Viktor Kerkez and Vincent Gonguet and Virginie Do and Vish Vogeti and Vítor Albiero and Vladan Petrovic and Weiwei Chu and Wenhan Xiong and Wenyin Fu and Whitney Meers and Xavier Martinet and Xiaodong Wang and Xiaofang Wang and Xiaoqing Ellen Tan and Xide Xia and Xinfeng Xie and Xuchao Jia and Xuewei Wang and Yaelle Goldschlag and Yashesh Gaur and Yasmine Babaei and Yi Wen and Yiwen Song and Yuchen Zhang and Yue Li and Yuning Mao and Zacharie Delpierre Coudert and Zheng Yan and Zhengxing Chen and Zoe Papakipos and Aaditya Singh and Aayushi Srivastava and Abha Jain and Adam Kelsey and Adam Shajnfeld and Adithya Gangidi and Adolfo Victoria and Ahuva Goldstand and Ajay Menon and Ajay Sharma and Alex Boesenberg and Alexei Baevski and Allie Feinstein and Amanda Kallet and Amit Sangani and Amos Teo and Anam Yunus and Andrei Lupu and Andres Alvarado and Andrew Caples and Andrew Gu and Andrew Ho and Andrew Poulton and Andrew Ryan and Ankit Ramchandani and Annie Dong and Annie Franco and Anuj Goyal and Aparajita Saraf and Arkabandhu Chowdhury and Ashley Gabriel and Ashwin Bharambe and Assaf Eisenman and Azadeh Yazdan and Beau James and Ben Maurer and Benjamin Leonhardi and Bernie Huang and Beth Loyd and Beto De Paola and Bhargavi Paranjape and Bing Liu and Bo Wu and Boyu Ni and Braden Hancock and Bram Wasti and Brandon Spence and Brani Stojkovic and Brian Gamido and Britt Montalvo and Carl Parker and Carly Burton and Catalina Mejia and Ce Liu and Changhan Wang and Changkyu Kim and Chao Zhou and Chester Hu and Ching-Hsiang Chu and Chris Cai and Chris Tindal and Christoph Feichtenhofer and Cynthia Gao and Damon Civin and Dana Beaty and Daniel Kreymer and Daniel Li and David Adkins and David Xu and Davide Testuggine and Delia David and Devi Parikh and Diana Liskovich and Didem Foss and Dingkang Wang and Duc Le and Dustin Holland and Edward Dowling and Eissa Jamil and Elaine Montgomery and Eleonora Presani and Emily Hahn and Emily Wood and Eric-Tuan Le and Erik Brinkman and Esteban Arcaute and Evan Dunbar and Evan Smothers and Fei Sun and Felix Kreuk and Feng Tian and Filippos Kokkinos and Firat Ozgenel and Francesco Caggioni and Frank Kanayet and Frank Seide and Gabriela Medina Florez and Gabriella Schwarz and Gada Badeer and Georgia Swee and Gil Halpern and Grant Herman and Grigory Sizov and Guangyi and Zhang and Guna Lakshminarayanan and Hakan Inan and Hamid Shojanazeri and Han Zou and Hannah Wang and Hanwen Zha and Haroun Habeeb and Harrison Rudolph and Helen Suk and Henry Aspegren and Hunter Goldman and Hongyuan Zhan and Ibrahim Damlaj and Igor Molybog and Igor Tufanov and Ilias Leontiadis and Irina-Elena Veliche and Itai Gat and Jake Weissman and James Geboski and James Kohli and Janice Lam and Japhet Asher and Jean-Baptiste Gaya and Jeff Marcus and Jeff Tang and Jennifer Chan and Jenny Zhen and Jeremy Reizenstein and Jeremy Teboul and Jessica Zhong and Jian Jin and Jingyi Yang and Joe Cummings and Jon Carvill and Jon Shepard and Jonathan McPhie and Jonathan Torres and Josh Ginsburg and Junjie Wang and Kai Wu and Kam Hou U and Karan Saxena and Kartikay Khandelwal and Katayoun Zand and Kathy Matosich and Kaushik Veeraraghavan and Kelly Michelena and Keqian Li and Kiran Jagadeesh and Kun Huang and Kunal Chawla and Kyle Huang and Lailin Chen and Lakshya Garg and Lavender A and Leandro Silva and Lee Bell and Lei Zhang and Liangpeng Guo and Licheng Yu and Liron Moshkovich and Luca Wehrstedt and Madian Khabsa and Manav Avalani and Manish Bhatt and Martynas Mankus and Matan Hasson and Matthew Lennie and Matthias Reso and Maxim Groshev and Maxim Naumov and Maya Lathi and Meghan Keneally and Miao Liu and Michael L. Seltzer and Michal Valko and Michelle Restrepo and Mihir Patel and Mik Vyatskov and Mikayel Samvelyan and Mike Clark and Mike Macey and Mike Wang and Miquel Jubert Hermoso and Mo Metanat and Mohammad Rastegari and Munish Bansal and Nandhini Santhanam and Natascha Parks and Natasha White and Navyata Bawa and Nayan Singhal and Nick Egebo and Nicolas Usunier and Nikhil Mehta and Nikolay Pavlovich Laptev and Ning Dong and Norman Cheng and Oleg Chernoguz and Olivia Hart and Omkar Salpekar and Ozlem Kalinli and Parkin Kent and Parth Parekh and Paul Saab and Pavan Balaji and Pedro Rittner and Philip Bontrager and Pierre Roux and Piotr Dollar and Polina Zvyagina and Prashant Ratanchandani and Pritish Yuvraj and Qian Liang and Rachad Alao and Rachel Rodriguez and Rafi Ayub and Raghotham Murthy and Raghu Nayani and Rahul Mitra and Rangaprabhu Parthasarathy and Raymond Li and Rebekkah Hogan and Robin Battey and Rocky Wang and Russ Howes and Ruty Rinott and Sachin Mehta and Sachin Siby and Sai Jayesh Bondu and Samyak Datta and Sara Chugh and Sara Hunt and Sargun Dhillon and Sasha Sidorov and Satadru Pan and Saurabh Mahajan and Saurabh Verma and Seiji Yamamoto and Sharadh Ramaswamy and Shaun Lindsay and Shaun Lindsay and Sheng Feng and Shenghao Lin and Shengxin Cindy Zha and Shishir Patil and Shiva Shankar and Shuqiang Zhang and Shuqiang Zhang and Sinong Wang and Sneha Agarwal and Soji Sajuyigbe and Soumith Chintala and Stephanie Max and Stephen Chen and Steve Kehoe and Steve Satterfield and Sudarshan Govindaprasad and Sumit Gupta and Summer Deng and Sungmin Cho and Sunny Virk and Suraj Subramanian and Sy Choudhury and Sydney Goldman and Tal Remez and Tamar Glaser and Tamara Best and Thilo Koehler and Thomas Robinson and Tianhe Li and Tianjun Zhang and Tim Matthews and Timothy Chou and Tzook Shaked and Varun Vontimitta and Victoria Ajayi and Victoria Montanez and Vijai Mohan and Vinay Satish Kumar and Vishal Mangla and Vlad Ionescu and Vlad Poenaru and Vlad Tiberiu Mihailescu and Vladimir Ivanov and Wei Li and Wenchen Wang and Wenwen Jiang and Wes Bouaziz and Will Constable and Xiaocheng Tang and Xiaojian Wu and Xiaolan Wang and Xilun Wu and Xinbo Gao and Yaniv Kleinman and Yanjun Chen and Ye Hu and Ye Jia and Ye Qi and Yenda Li and Yilin Zhang and Ying Zhang and Yossi Adi and Youngjin Nam and Yu and Wang and Yu Zhao and Yuchen Hao and Yundi Qian and Yunlu Li and Yuzi He and Zach Rait and Zachary DeVito and Zef Rosnbrick and Zhaoduo Wen and Zhenyu Yang and Zhiwei Zhao and Zhiyu Ma},
356
+ year={2024},
357
+ eprint={2407.21783},
358
+ archivePrefix={arXiv},
359
+ primaryClass={cs.AI},
360
+ url={https://arxiv.org/abs/2407.21783},
361
+ }
362
+ @misc{deepseekai2024deepseekv3technicalreport,
363
+ title={DeepSeek-V3 Technical Report},
364
+ author={DeepSeek-AI and Aixin Liu and Bei Feng and Bing Xue and Bingxuan Wang and Bochao Wu and Chengda Lu and Chenggang Zhao and Chengqi Deng and Chenyu Zhang and Chong Ruan and Damai Dai and Daya Guo and Dejian Yang and Deli Chen and Dongjie Ji and Erhang Li and Fangyun Lin and Fucong Dai and Fuli Luo and Guangbo Hao and Guanting Chen and Guowei Li and H. Zhang and Han Bao and Hanwei Xu and Haocheng Wang and Haowei Zhang and Honghui Ding and Huajian Xin and Huazuo Gao and Hui Li and Hui Qu and J. L. Cai and Jian Liang and Jianzhong Guo and Jiaqi Ni and Jiashi Li and Jiawei Wang and Jin Chen and Jingchang Chen and Jingyang Yuan and Junjie Qiu and Junlong Li and Junxiao Song and Kai Dong and Kai Hu and Kaige Gao and Kang Guan and Kexin Huang and Kuai Yu and Lean Wang and Lecong Zhang and Lei Xu and Leyi Xia and Liang Zhao and Litong Wang and Liyue Zhang and Meng Li and Miaojun Wang and Mingchuan Zhang and Minghua Zhang and Minghui Tang and Mingming Li and Ning Tian and Panpan Huang and Peiyi Wang and Peng Zhang and Qiancheng Wang and Qihao Zhu and Qinyu Chen and Qiushi Du and R. J. Chen and R. L. Jin and Ruiqi Ge and Ruisong Zhang and Ruizhe Pan and Runji Wang and Runxin Xu and Ruoyu Zhang and Ruyi Chen and S. S. Li and Shanghao Lu and Shangyan Zhou and Shanhuang Chen and Shaoqing Wu and Shengfeng Ye and Shengfeng Ye and Shirong Ma and Shiyu Wang and Shuang Zhou and Shuiping Yu and Shunfeng Zhou and Shuting Pan and T. Wang and Tao Yun and Tian Pei and Tianyu Sun and W. L. Xiao and Wangding Zeng and Wanjia Zhao and Wei An and Wen Liu and Wenfeng Liang and Wenjun Gao and Wenqin Yu and Wentao Zhang and X. Q. Li and Xiangyue Jin and Xianzu Wang and Xiao Bi and Xiaodong Liu and Xiaohan Wang and Xiaojin Shen and Xiaokang Chen and Xiaokang Zhang and Xiaosha Chen and Xiaotao Nie and Xiaowen Sun and Xiaoxiang Wang and Xin Cheng and Xin Liu and Xin Xie and Xingchao Liu and Xingkai Yu and Xinnan Song and Xinxia Shan and Xinyi Zhou and Xinyu Yang and Xinyuan Li and Xuecheng Su and Xuheng Lin and Y. K. Li and Y. Q. Wang and Y. X. Wei and Y. X. Zhu and Yang Zhang and Yanhong Xu and Yanhong Xu and Yanping Huang and Yao Li and Yao Zhao and Yaofeng Sun and Yaohui Li and Yaohui Wang and Yi Yu and Yi Zheng and Yichao Zhang and Yifan Shi and Yiliang Xiong and Ying He and Ying Tang and Yishi Piao and Yisong Wang and Yixuan Tan and Yiyang Ma and Yiyuan Liu and Yongqiang Guo and Yu Wu and Yuan Ou and Yuchen Zhu and Yuduan Wang and Yue Gong and Yuheng Zou and Yujia He and Yukun Zha and Yunfan Xiong and Yunxian Ma and Yuting Yan and Yuxiang Luo and Yuxiang You and Yuxuan Liu and Yuyang Zhou and Z. F. Wu and Z. Z. Ren and Zehui Ren and Zhangli Sha and Zhe Fu and Zhean Xu and Zhen Huang and Zhen Zhang and Zhenda Xie and Zhengyan Zhang and Zhewen Hao and Zhibin Gou and Zhicheng Ma and Zhigang Yan and Zhihong Shao and Zhipeng Xu and Zhiyu Wu and Zhongyu Zhang and Zhuoshu Li and Zihui Gu and Zijia Zhu and Zijun Liu and Zilin Li and Ziwei Xie and Ziyang Song and Ziyi Gao and Zizheng Pan},
365
+ year={2024},
366
+ eprint={2412.19437},
367
+ archivePrefix={arXiv},
368
+ primaryClass={cs.CL},
369
+ url={https://arxiv.org/abs/2412.19437},
370
  }
src/index.html CHANGED
@@ -7,15 +7,15 @@
7
  <meta name="viewport" content="width=device-width, initial-scale=1">
8
  <meta charset="utf8">
9
  <base target="_blank">
10
- <title>FineWeb: decanting the web for the finest text data at scale</title>
11
  <link rel="stylesheet" href="style.css">
12
  </head>
13
 
14
  <body>
15
  <d-front-matter>
16
  <script id='distill-front-matter' type="text/json">{
17
- "title": "🔭 Ultra-Guide to Scaling LLM training",
18
- "description": "This blog covers everything about scaling LLMs in 2024.",
19
  "published": "Sept 28, 2024",
20
  "affiliation": {"name": "HuggingFace"},
21
  "authors": [
@@ -37,7 +37,7 @@
37
  </script>
38
  </d-front-matter>
39
  <d-title>
40
- <h1 class="l-page" style="text-align: center;">🔭 Ultra-Guide to Scaling LLM training</h1>
41
  <div id="title-plot" class="main-plot-container l-screen">
42
  <figure>
43
  <img src="assets/images/banner.png" alt="FineWeb">
@@ -51,21 +51,22 @@
51
  <d-article>
52
  <d-contents>
53
  </d-contents>
 
 
54
 
55
- <p>The performance of a large language model (LLM) depends heavily on the quality and size of the LLMs.
56
- However, the pretraining datasets for state-of-the-art open LLMs like Llama 3<d-cite
57
- bibtex-key="llama3modelcard"></d-cite> and Mixtral<d-cite bibtex-key="jiang2024mixtral"></d-cite> are
58
- not publicly available and very little is known about how they were created.</p>
59
  <aside>Reading time: 7 days. For the best reading experience, we recommend not using a mobile phone.</aside>
60
 
61
- <p>Recently, we released <a href="https://huggingface.co/datasets/HuggingFaceFW/fineweb"><strong>🍷
62
- FineWeb</strong></a>, a new, large-scale
63
- (<strong>15-trillion tokens, 44TB disk space</strong>) dataset for LLM pretraining. FineWeb is derived from
64
- 96 <a href="https://commoncrawl.org/">CommonCrawl</a> snapshots and produces <strong>better-performing LLMs
65
- than other open pretraining datasets</strong>.
66
 
67
  <aside>We are extremely thankful to the whole <a href="https://distill.pub/">distill.pub</a> team for creating
68
  the template on which we based this blog post.</aside>
 
 
 
 
 
69
  <div id="graph"></div>
70
  <div id="controls">
71
  <div class="cell column-1">
@@ -168,972 +169,50 @@
168
  </div>
169
  </div>
170
 
171
- <p><strong>TLDR:</strong> This blog covers a discussion on processing and evaluating data quality at scale, the
172
- 🍷 FineWeb
173
- recipe (listing and explaining all of our design choices), and the process followed to create its 📚
174
- FineWeb-Edu subset.</p>
175
-
176
- <h2>Scaling Models and Hardware</h2>
177
-
178
- <p>Now that we know the basics of distributed communication and computations it's time to apply this to training
179
- LLMs at scale. Here's the plan of action: we'll go through increasingly complex distribution strategies,
180
- namely data, then tensor and finally pipeline parallelism, and show three things:</p>
181
-
182
- <ol>
183
- <li>conceptual explanations with diagrams</li>
184
- <li>a minimal coding example illustrating how to implement said strategy</li>
185
- <li>scaling experiments show casing strengths and limits of the method with real data</li>
186
- </ol>
187
-
188
- <p>For the experiments we scale across two dimensions: we make the models larger and larger and add more and
189
- more compute nodes and measure how throughput changes.</p>
190
-
191
- <p>So this is a good point to get ☕ #2 and we'll have a look at the setup for the practical experiments.</p>
192
-
193
- <h2>Experiment setup</h2>
194
-
195
- <table>
196
- <thead>
197
- <tr>
198
- <th></th>
199
- <th><strong>1B (1)</strong></th>
200
- <th><strong>7B</strong></th>
201
- <th><strong>70B</strong></th>
202
- <th><strong>340B (2)</strong></th>
203
- <th><strong>400B (3)</strong></th>
204
- </tr>
205
- </thead>
206
- <tbody>
207
- <tr>
208
- <td><strong>N Layers</strong></td>
209
- <td>24</td>
210
- <td>32</td>
211
- <td>80</td>
212
- <td>96</td>
213
- <td>126</td>
214
- </tr>
215
- <tr>
216
- <td><strong>N Heads</strong></td>
217
- <td>32</td>
218
- <td>32</td>
219
- <td>64</td>
220
- <td>96</td>
221
- <td>128</td>
222
- </tr>
223
- <tr>
224
- <td><strong>Dimension</strong></td>
225
- <td>2048</td>
226
- <td>4096</td>
227
- <td>8192</td>
228
- <td>18432</td>
229
- <td>16384</td>
230
- </tr>
231
- </tbody>
232
- </table>
233
-
234
- <p>(1) FineWeb ablation models</p>
235
- <p>(2) Nemotron-340B architecture (without GQA)</p>
236
- <p>(3) Llama-400B, ffn dim = 1.2 hidden dim (without GQA)</p>
237
-
238
-
239
- <h2>Distribution Methods</h2>
240
-
241
- <p>Efficiently training LLMs now requires amounts of compute which exceed in most case single GPUs or machine.
242
- Large distributed clusters are thus used to train these models and can range from hundreds to thousands of
243
- nodes each usually equipped with up to 8 GPUs. To make the best use of such an expensive hardware, a range
244
- of distributed training methods have been developed with the goal of ensuring that GPUs are highly utilized
245
- at all times and not waiting for data/synchronization/etc.</p>
246
-
247
- <p>Several methods can be used to distribute training and we'll start with 4D parallelism followed-up by
248
- DeepSpeed stages. While we explain these strategies we'll also run experiments to determine the trade-offs
249
- and understand the optimal settings.</p>
250
- <p>The name "4D parallelism" originates from the fact that it involves combining up to 4 distribution methods:
251
- data, tensor, pipeline, and sequence parallelism (each of these techniques can be used independently of the
252
- other). You may thus ask "So which one should I use?".</p>
253
-
254
- <p>Unfortunately, there is no universal answer as the response will actually depend on the cluster setup as well
255
- as the model architecture. But do not despair for in this section we'll develop strategies to figure out the
256
- best setting experimentally!</p>
257
-
258
- <p>In addition to 4D parallelism we'll also take a look at "DeepSpeed", a method developed by Microsoft which is
259
- generally complimentary to 4D parallelism and can be leveraged on top of it.</p>
260
-
261
- <p><strong>Idea: show two things in every section</strong></p>
262
- <ol>
263
- <li>a small toy model (e.g. 4 layer FFN) we can interactively show with every approach</li>
264
- <li>a benchmark showing the improvement/limits of the approach (e.g. when you cross 1 node with TP)</li>
265
- </ol>
266
-
267
- <h3>No Parallelism</h3>
268
-
269
- <p>Let's quickly go over the basics before going into distributed training. When a model is trained on a single
270
- GPU, the training consists of 3 steps in the simplest case:</p>
271
- <ol>
272
- <li>one forward pass,</li>
273
- <li>one backward pass to compute the gradients, and</li>
274
- <li>an optimization step using the gradients to update the parameters</li>
275
- </ol>
276
-
277
- <p>As we'll see in the future, these steps may be repeated or intertwined but for now we'll start simple:</p>
278
- <p>As we'll see in the future, these steps may be repeated or intertwined but for now we'll start simple:</p>
279
-
280
- <img src="assets/images/IMG_7537D08D7F41-1.jpeg" alt="Training Steps">
281
-
282
- <p>In this figure the successive blue boxes on the top line can be seen as successive layers inside a model
283
- (same for the last line). The red boxes are the associated gradients for each of these layers.</p>
284
-
285
- <p>The batch size (<em>bs</em>) is one of the most important hyper-parameters in machine learning, affecting
286
- both model convergence and throughput.</p>
287
-
288
- <p>If the batch size is too small, gradients will tend to be noisy and the model may not be able to converge to
289
- optimal performances while a batch size too large can make the convergence of the model slower and waste
290
- compute. You can find a nice discussion of this topic in OpenAI's paper on large batch training (<a
291
- href="https://arxiv.org/abs/1812.06162">https://arxiv.org/pdf/1812.06162</a>).</p>
292
-
293
- <p>The batch size also affects the throughput: a small batch size will require more optimizer steps to train on
294
- a given amount of samples. Optimizer steps are costly (in compute time) and the throughput will thus be
295
- lower than when using a larger batch size. On the other hand, larger batches, while leading to higher
296
- throughput may suffer from slow convergence in the limits as we've just seen. There is generally an optimal
297
- batch size from a convergence/performance point of view (note that the batch size can usually still be
298
- changed around the optimal batch size without major impact to the performance of the model).</p>
299
-
300
- <p>Note that in the LLM community, batch sizes are commonly reported in terms of tokens instead of number of
301
- samples (BST - Batch Size Tokens) as each token has a label and thus a loss term and can thus be considered
302
- individual (although highly correlated) samples.</p>
303
-
304
- <p>A sweet spot for LLM training is usually on the order of 4-20 million tokens per batch (links GPT-3,
305
- DeepSeek, Llama). In the simplest case, training on a single machine, the <em>BS</em> and <em>BST</em> can
306
- be computed from the model input sequence length as follows:</p>
307
-
308
- <d-math>
309
- bst=bs *seq
310
- </d-math>
311
-
312
- <p>(note that from here on forward we'll show the formulas for the batch size in number of samples but you can
313
- always get its token-unit counterpart by multiplying it with the sequence length)</p>
314
-
315
- <p>And we're now hitting our first scaling problem:</p>
316
-
317
- <blockquote>
318
- <p>what if we can't fit the model into GPU memory even with <code>BS=1</code>?</p>
319
- </blockquote>
320
-
321
- <p>Good question, reader!</p>
322
-
323
- <p>Let's start by understanding what led to our out-of-memory issue in the first place.</p>
324
-
325
- <h2>A brief overview of memory usage in Transformers</h2>
326
-
327
- <p>To train a neural network model, one needs to store many elements in memory besides the weights themselves.
328
- Generally, the memory usage is made up from the following elements:</p>
329
- <ul>
330
- <li>model weights</li>
331
- <li>model gradients</li>
332
- <li>optimizer states</li>
333
- <li>activations computed during the forward pass and which are needed to compute the backward pass</li>
334
- <li>also CUDA Kernels require 1-2GB of GPU memory which you can quickly check yourself by running
335
- <code>import torch; torch.ones((1, 1)).to("cuda")</code> and then checking the GPU memory with
336
- <code>nvidia-smi</code>
337
- </li>
338
- <li>lower rest memory usage from buffers, intermediate results and some memory that can't be used due to
339
- fragmentation</li>
340
- </ul>
341
-
342
- <p>Scaling up training is usually a question of playing with those constituents to keep memory low while not
343
- impacting performance too much. We'll neglect the last two contributors as there's usually not that much you
344
- can do about them unless you dive deep in the code.</p>
345
-
346
- <p>For the rest, they are usually different types of tensors that can have various sizes (usually multiples of
347
- one or several of batch size, sequence length, model hidden dimension and some potential sharding) and
348
- various precisions (with optimizer states and weights copy being often kept in full FP32 precision while
349
- activations can be of lower precision like BF16 or FP8). Let's try to get some intuition for the memory
350
- requirement of these various elements.</p>
351
-
352
- <p>Let's first look at the weights, gradients and optimizer states. They are all dependent on the number of
353
- parameters in a model. For a simple LLM the number of parameters is given by the following formula:</p>
354
-
355
- <d-math>
356
- N = h*v + L * (12 * h^2 + 13*h) + 2*h
357
- </d-math>
358
-
359
- <p>In that equation, <em>h</em> corresponds to the hidden dimension, <em>v</em> to the vocabulary size, and
360
- <em>L</em> the number of layers in the model. Note that looking at the equation we can see that the term
361
- that will dominate at large model scales is the one with <em>h^2</em> since it's the only term growing
362
- quadratically as we scale the models.
363
- </p>
364
-
365
- <p>Let's see how the number of parameters translates to memory usage. The memory requirements for the parameters
366
- and gradients are the number of parameters multiplied by the number of bytes per parameter. Mixed precision
367
- training with BF16 is the default nowadays which requires 2 bytes per parameter. In addition, there are a
368
- number of values necessary for the optimizer states: for ADAM it requires the momentum and the variance in
369
- FP32, each using 4 bytes, and an additional copy of the model weights in FP32, thus 12 bytes per parameter
370
- (ref: <a href="https://arxiv.org/pdf/1910.02054">ZeRO</a>):</p>
371
-
372
- <d-math>
373
- m_{params} = 2 * N
374
- m_{grad} = 2 * N
375
- m_{opt} = (4+4+4) * N
376
- </d-math>
377
-
378
- <p>In old-fashioned full precision training both parameters and gradients would require 4 bytes each but the
379
- optimizer on the other hand wouldn't need to store an extra full precision copy of the weights:</p>
380
 
381
- <d-math>
382
- m_{params} = 4 * N
383
- m_{grad} = 4 * N
384
- m_{opt} = (4+4) * N
385
- </d-math>
386
 
387
- <p>So we can easily see that mixed precision itself doesn't save memory as it just distributes the memory
388
- differently across the three components. So by multiplying the number of parameters by 16 (=2+2+12) you can
389
- quickly get a sense of how much GPU memory we need for a model:</p>
390
- <p>So we can easily see that mixed precision itself doesn't save memory as it just distributes the memory
391
- differently across the three components. So by multiplying the number of parameters by 16 (=2+2+12) you can
392
- quickly get a sense of how much GPU memory we need for a model:</p>
393
 
394
- <table>
395
- <thead>
396
- <tr>
397
- <th>Model parameters</th>
398
- <th>Memory requirements</th>
399
- </tr>
400
- </thead>
401
- <tbody>
402
- <tr>
403
- <td>1B</td>
404
- <td>16 GB</td>
405
- </tr>
406
- <tr>
407
- <td>7B</td>
408
- <td>112 GB</td>
409
- </tr>
410
- <tr>
411
- <td>70B</td>
412
- <td>1120 GB</td>
413
- </tr>
414
- <tr>
415
- <td>405B</td>
416
- <td>6480 GB</td>
417
- </tr>
418
- </tbody>
419
- </table>
420
 
421
- <p>We can further decrease the memory usage if we choose FP8 training instead of BF16 but it is much less stable
422
- and a very active research topic (see <a href="https://x.com/xariusrke/status/1826669126955278401">here</a>)
423
- thus we won't go in details here.</p>
424
 
425
- <p>But we are not done yet, we'll also need to store the forward pass activations which are used during the
426
- backward pass to compute the gradients. The total memory required for the activations in mixed precision
427
- (which contributes the leading factor of 2 below) is given by the following equation:</p>
428
 
429
- <d-math>
430
- m_{act} = 2 * L* seq * bs * h * (34 + \frac{5*n_{heads}*seq}{h})
431
- </d-math>
432
-
433
- <p>You can follow <a href="https://arxiv.org/pdf/2205.05198">this NVIDIA paper</a> for a complete derivation, it
434
- essentially requires you to do some accounting of all the sizes of intermediate activations between each
435
- operation. What's interesting here is that the memory is not static for a given model but depends critically
436
- on the sequence length. We can use the memory formulas and have a look how the memory usage changes for a
437
- model for various sequence lengths:</p>
438
-
439
- <img src="assets/images/image%206.png" alt="Memory Usage Graph 1">
440
- <img src="assets/images/image%207.png" alt="Memory Usage Graph 2">
441
-
442
- <p>This graph tells a striking story: for short sequences, activations are almost negligible, but starting at
443
- around 2-4k tokens they start to take up a significant amount of memory while parameter, gradient and
444
- optimizer state are roughly independent of the sequence length and batch size. For large batch/sequence,
445
- activations however become by far the largest memory burden.</p>
446
-
447
- <p>Is there a way to tame this "activation explosion"?</p>
448
-
449
- <p>Good question, reader! I see you're following well and you're lucky as the answer is "Yes"! Let's talk about
450
- a technique called <strong>gradient checkpointing</strong> or more frequently <strong>activation
451
- recomputation</strong> which can help us cap activation memory footprint and is an essential tool in
452
- today's large model training toolbox.</p>
453
-
454
- <h3>Activation recomputation</h3>
455
-
456
- <p>The general idea behind gradient checkpointing is to discard some activations to save memory if we are
457
- willing to spend some extra compute to recompute them when needed. Typically we will save activations at
458
- some key points in memory and discard the rest and recompute them during the backward pass from the nearest
459
- activations:</p>
460
-
461
- <img src="assets/images/IMG_C4260C5C58DC-1.jpeg" alt="Activation Recompute">
462
-
463
- <p>We can select these key activations according to several strategies and modern frameworks usually choose
464
- among the following three strategies:</p>
465
- <ul>
466
- <li><strong>None</strong>: We don't recompute activations during the backward pass and keep all activations
467
- in memory. While this is the fastest and thus computationally cheapest option, it also requires the most
468
- memory.</li>
469
- <li><strong>Full</strong>: The simplest strategy from a conceptual point of view is to checkpoint
470
- activations between each Transformer layer. This is usually called the <code>full</code> strategy since
471
- it requires a forward pass through each layer essentially adding a full forward pass during the backward
472
- pass. This strategy saves the most memory but is the most expensive one in terms of compute. This
473
- increases the compute cost by up to 30-40% which is very noticeable.</li>
474
- <li><strong>Selective</strong>: In general we can do better than full. The authors of <a
475
- href="https://arxiv.org/pdf/2205.05198">this paper</a> did a detailed analysis studying which
476
- activations grow the largest and have the cheapest recomputation cost in terms of FLOPs. Turns out that
477
- the attention computations fall in that category, and thus we can usually discard them and focus on
478
- checkpointing expensive feedforward computations. Note: for a GPT-3 (175B) model this means 70%
479
- activation memory reduction at a 2.7% compute cost.</li>
480
- </ul>
481
-
482
- <p>Let's see how recomputation strategies can drastically reduce the memory footprint while selective
483
- recomputation strikes a nice balance between memory saving and recomputation cost:</p>
484
- <p>Let's see how recomputation strategies can drastically reduce the memory footprint while selective
485
- recomputation strikes a nice balance between memory saving and recomputation cost:</p>
486
-
487
- <img src="assets/images/image%208.png" alt="Recomputation Strategies">
488
-
489
- <p>Note: Hardware vs Model flops.</p>
490
-
491
- <p>Most frameworks these days use FlashAttention (TODO: see later) which makes the attention computation less
492
- memory intensive through kernel fusion, thus most trainings use the <code>full</code> settings.</p>
493
-
494
- <p>We can save some GPU memory with activation recomputation but this only delays by a bit the next bottleneck:
495
- as hinted earlier for LLM training there is usually a sweet spot for the GBST and we need to work out the
496
- training configuration backward from there. However, you can't choose MBS to be an arbitrary large number on
497
- your GPU; at some point you will run out of GPU memory again since you need to store at least some of the
498
- activations in memory.</p>
499
-
500
- <p>There is a useful trick to compensate for that: <strong>gradient accumulation</strong> (<em>GradAcc</em>).
501
- With gradient accumulation we will split our batch in micro-batch, do forward and backward passes repeatedly
502
- on each micro-batch, compute the gradients, and, as the name suggests, sum the gradients step by step before
503
- doing a final optimizer step.</p>
504
-
505
- <p>We call the <code>micro batch size</code> (MBS) the batch size for each forward pass on a single node (the
506
- number of samples flowing through the model in one forward pass). We'll refer to the overall batch size
507
- between each optimizer step as the <code>global batch size</code> (GBS). If we do one optimizer step each 8
508
- forward/backward pass, the <code>global batch size</code> will be 8 times the <code>micro batch size</code>.
509
- </p>
510
-
511
- <p>What we now call <code>global batch size</code> thus corresponds to what we've called up to now just
512
- <code>batch size</code> for simplicity (we now make the terms more precise to avoid ambiguity).
513
- </p>
514
-
515
- <p>With gradient accumulation the global batch size can be computed as follows:</p>
516
-
517
- <d-math>
518
- BS = GBS=MBS * GradAcc
519
- </d-math>
520
-
521
- <p>Gradient accumulation allows us to effectively increase our batch size up to infinity (!) while the memory
522
- footprint stays constant. Gradient accumulation is also compatible with activation recomputation for further
523
- memory reduction. One drawback however, is that gradient accumulation requires multiple consecutive
524
- forward/backward passes per optimization step thereby increasing the compute overhead and slowing down
525
- training. No free lunch!</p>
526
-
527
- <img src="assets/images/IMG_DA188FF29F45-1.jpeg" alt="Gradient Accumulation">
528
-
529
- <p>This is actually a bummer since the forward/backward passes for each micro-batch could actually totally be
530
- run in parallel. They are independent from each other and the only changing parameter are the input samples.
531
- </p>
532
-
533
- <p>Here comes data parallelism to solve exactly this problem! Let's take a look, you say? Okay sure!</p>
534
-
535
- <h3>Data Parallelism</h3>
536
-
537
- <p>The idea behind data parallelism (DP) is to parallelize forward and backward passes across GPUs, passing
538
- different batches of data per GPU (or groups of GPUs) to the same model instance. Just like for gradient
539
- accumulation, we need to average gradients across instances before we do the optimization step. The GBS
540
- equation can then be extended to:</p>
541
-
542
- <d-math>
543
- GBS=MBS * GradAcc * DP
544
- </d-math>
545
-
546
- <p>This means that we can reduce the number of gradient accumulation steps in favor of data parallel processes
547
- which speeds up training. In practice, people will tend to max out the number of data parallel nodes (the DP
548
- above) as much as possible as it's inherently parallel versus the sequential Gradient Accumulation. Gradient
549
- accumulation is then added only to achieve a target batch size if DP alone is not sufficient. One exception
550
- to that is pipeline parallelism which we'll discuss later.</p>
551
-
552
- <img src="assets/images/IMG_A95961668B3F-1.jpeg" alt="Data Parallelism">
553
-
554
- <p>As you can see on the figure above, some gradients can already be gathered and summed (red boxes) even before
555
- gradients down the line (red boxes on the left of the current gradient) are still being computed. This
556
- significantly speeds up data parallelism. For instance, as soon as the backward pass of the last layer is
557
- done (last boxes on the right) those gradients can already be gathered/summed while the backward pass
558
- computations move to earlier layers, aka to the left. This lowers the communication/bandwidth pressure to
559
- sync gradients of the full model as it can be performed in part in parallel to the computation of said
560
- gradients. See <a href="https://siboehm.com/articles/22/data-parallel-training">this article</a> for more
561
- information.</p>
562
-
563
- <p>A general recipe to determine an optimal data-parallel setup can be as follows:</p>
564
  <ol>
565
- <li>Determine the best (global) batch size in tokens to use either by consulting literature or running
566
- experiments? This determines the GBST.</li>
567
- <li>Select a sequence length for training, again by either consulting literature or running experiments.
568
- Generally 2-8k tokens works reliably well.</li>
569
- <li>You now know the batch size (GBS=GBST/SeqLen). Find the maximum MBS on a single GPU by increasing the
570
- local batch size until you run out of memory. This determines the MBS.</li>
571
- <li>Finally, the number of available GPUs corresponds to the potential DP. The ratio of GPT to DP determines
572
- the remaining number of gradient accumulation steps needed for the desired GBS.</li>
573
- </ol>
574
-
575
- <p>If the gradient accumulation ratio is lower than one, i.e. you have too many GPUs (!), you can either choose
576
- to not use all your GPUs or test if a lower MBS will speed up training. In these cases, you may want to
577
- prioritize throughput over the individual GPU utilization, you can then choose DP first and use a smaller
578
- MBS than possible in order to speed up training.</p>
579
-
580
- <p>Time to take a concrete example: We want to train a model with a GBS of 4M tokens and a sequence length of
581
- 4k. This means our batch size will be 1024 samples (we pick powers of two). We observe that a single of our
582
- GPU can fit MBS=2 in memory and we have 128 GPUs available for training. This means with 4 gradient
583
- accumulation steps we'll achieve our goal of 1024 samples or 4M tokens per training step. Now what if we
584
- suddenly have 1024 GPUs available? We can achieve the same GBS and thus identical training by setting both
585
- MBS and gradient accumulation to 1 speeding up training significantly.</p>
586
-
587
- <p>[EXPERIMENTS WHERE WE INCREASE DP AND SHOW THROUGHPUT FOR SEVERAL MODELS]</p>
588
-
589
- <p>We've explored data parallelism, a simple strategy to scale training across more GPUs and gives consistent
590
- speed improvements. The keen reader might have noticed however that it rests on the assumption that we can
591
- fit at least one input sample forward pass (<em>MBS=1</em>) into our GPU memory. This is not always the
592
- case! In particular for larger models which often don't fit into a single GPU anymore even with activation
593
- recomputations activated.</p>
594
-
595
- <p>In such case, we need to shard the model across devices! We'll now study two complementary sharding methods,
596
- tensor and pipeline parallelism which are doing that. Let's start by the simplest, tensor parallelism!</p>
597
-
598
- <h3>Tensor Parallelism</h3>
599
-
600
- <p>So you've exhausted all the previous textbook tricks to try to fit your model on a single GPU but it still
601
- doesn't fit? Let's try to distribute this model across several GPUs. Unlike DP we will not simply duplicate
602
- the model but various parts of the model instance will be living on various GPUs.</p>
603
-
604
- <p>If we take a look at a typical matrix multiplication (the core of a neural network), we can get an idea about
605
- how we could split the model:</p>
606
-
607
- <img src="assets/images/image%209.png" alt="Matrix Multiplication Example">
608
-
609
- <p>Tensor parallelism is a technique in which a tensor is split into N shards along a particular dimension
610
- across N GPUs. Matrices can be split either on the column part or row part leading to row and column
611
- parallelism. Depending on which splitting strategy we choose will require different communications
612
- primitives.</p>
613
-
614
- <p><strong>Column linear:</strong></p>
615
- <ul>
616
- <li>Splitting by column or row involves different synchronization primitives:
617
- <ul>
618
- <li>column:
619
- <ul>
620
- <li>A <strong>Broadcast</strong> operation is used to send the same input to different GPUs,
621
- </li>
622
- <li>Multiplications are done independently on the GPUs, and finally</li>
623
- <li>An <strong>All-gather</strong> operation is used to gather the output results.</li>
624
- </ul>
625
- </li>
626
- <li>Row:
627
- <ul>
628
- <li>A <strong>Scatter</strong> operation is used to split the input and send it to different
629
- GPUs (we split the weight row-wise),</li>
630
- <li>Multiplications are done independently on the GPUs, and finally</li>
631
- <li>An <strong>All-reduce</strong> operation is used to add the results together and the
632
- full output results.</li>
633
- </ul>
634
- </li>
635
- </ul>
636
- </li>
637
- </ul>
638
-
639
- <p>This was for an example matrix multiplication. How do we apply this in practice to a real model? In the
640
- Transformer, there are 2 basic building blocks where tensor parallel can be applied:</p>
641
- <ul>
642
- <li>Feedforward layers (MLP)</li>
643
- <li>Multi-Head Attention (MHA)</li>
644
  </ul>
645
-
646
- <p>Feedforward layers comprise 2 successive MLPs with a non-linearity in-between. Here is the first part of it:
647
- </p>
648
-
649
- <img src="assets/images/image%2012.png" alt="Feedforward Layers">
650
-
651
- <p>Should we use row or column parallelization for the first MLP?</p>
652
-
653
- <p>Well it turns out parallelized GeLU only works in Column schema:</p>
654
-
655
- <p>In column schema:</p>
656
- <d-math>
657
- GeLU(cat([XW1, XW2])) = cat([GeLU(XW1), GeLU(XW2)])
658
- </d-math>
659
-
660
- <p>In row schema:</p>
661
- <d-math>
662
- GeLU(XW1 + XW2) \neq GeLU(XW1) + GeLU(XW2)
663
- </d-math>
664
-
665
- <p>If you rather like code, note that we can prove this with the following snippet as well:</p>
666
-
667
- <d-code block language="python">
668
- ```
669
- </region_of_file_to_rewritten_file>
670
- def example_gelu():
671
- from torch.nn.functional import gelu
672
-
673
- X = torch.randn(4, 2, device="cuda", dtype=torch.float32)
674
- W = torch.randn(2, 2, device="cuda", dtype=torch.float32)
675
-
676
- W_0, W_1 = W.chunk(2, dim=1)
677
-
678
- # Column linear
679
- y_col_1 = torch.cat([gelu(X @ W_0), gelu(X @ W_1)], dim=1)
680
- y_col_2 = gelu(torch.cat([X @ W_0, X @ W_1], dim=1))
681
-
682
- # All match
683
- torch.testing.assert_close(y_col_1, y_col_2, rtol=1e-5, atol=1e-5)
684
-
685
- # Row linear
686
- X_0, X_1 = X.chunk(2, dim=1)
687
- W_0, W_1 = W.chunk(2, dim=0)
688
- y_row_1 = gelu(X_0 @ W_0) + gelu(X_1 @ W_1)
689
- y_row_2 = gelu(X_0 @ W_0 + X_1 @ W_1)
690
-
691
- # Mismatch
692
- torch.testing.assert_close(y_row_1, y_row_2, rtol=1e-5, atol=1e-5)
693
- </d-code>
694
-
695
- <p>To avoid a synchronization step directly after the first MLP, we'll thus start with Column Parallel and be
696
- able to directly perform parallel GELU.</p>
697
-
698
- <p>Now, what about the second MLP? Should it be column or row parallel? Let's draft both options:</p>
699
- <ul>
700
- <li>Column Parallel followed by Column Parallel</li>
701
- <img src="assets/images/image%2013.png" alt="Column Parallel Schema 1">
702
- <li>Column Parallel followed by Row Parallel</li>
703
- <img src="assets/images/image%2014.png" alt="Column Parallel Schema 2">
704
  </ul>
705
-
706
- <p>We see that the "Column Parallel followed by Row Parallel" schema only involves two communications instead of
707
- four. It's thus the most efficient schema in terms of communications.</p>
708
-
709
- <p>Let's take a quick look at the backward pass:</p>
710
- <img src="assets/images/image%2015.png" alt="Backward Pass 1">
711
- <img src="assets/images/image%2016.png" alt="Backward Pass 2">
712
-
713
- <d-code block language="python">
714
- def column_linear_forward(X, local_W, group):
715
- Y_local = X @ local_W.t()
716
- return Y_local
717
-
718
- def column_linear_backward(local_grad_Y, X, local_W, group):
719
- local_grad_X = local_grad_Y @ local_W
720
- grad_W = local_grad_Y.t() @ X
721
- return local_grad_X, grad_W
722
-
723
- def row_linear_forward(local_X, local_W, group):
724
- Y_local = local_X @ local_W.t()
725
- dist.all_reduce(Y_local, group=group)
726
- Y = Y_local
727
- return Y
728
-
729
- def row_linear_backward(grad_Y, X, local_W, group):
730
- local_grad_X = grad_Y @ local_W
731
- grad_W = grad_Y.t() @ X
732
- return local_grad_X, grad_W
733
-
734
- def example_column_row_linear():
735
- # torchrun --nproc_per_node=2 tp_all_reduce.py
736
- group = dist.distributed_c10d._get_default_group()
737
-
738
- X_ref = torch.arange(4 * 2, device="cuda", dtype=torch.float32, requires_grad=True).reshape(4, 2)
739
- W_ref_layer1 = torch.arange(1, 5, device="cuda", dtype=torch.float32, requires_grad=True).reshape(2, 2) * 10
740
- W_ref_layer2 = torch.arange(1, 5, device="cuda", dtype=torch.float32, requires_grad=True).reshape(2, 2)
741
-
742
- X_ref.retain_grad()
743
- W_ref_layer1.retain_grad()
744
- W_ref_layer2.retain_grad()
745
-
746
- dist.broadcast(X_ref, src=0, group=group)
747
- dist.broadcast(W_ref_layer1, src=0, group=group)
748
- dist.broadcast(W_ref_layer2, src=0, group=group)
749
-
750
- X = X_ref.clone()
751
- W_layer1 = W_ref_layer1.clone()
752
- W_layer2 = W_ref_layer2.clone()
753
-
754
- # Forward
755
- Y_ref_linear1 = X_ref @ W_ref_layer1.t()
756
- Y_ref_linear1.retain_grad()
757
-
758
- # We will transpose for matrix multiplication. As a result, we need to split row-wise
759
- Y_local_linear1 = column_linear_forward(X, split_tensor(W_layer1, dim=0), group)
760
-
761
- torch.testing.assert_close(Y_local_linear1, split_tensor(Y_ref_linear1, dim=1), rtol=1e-5, atol=1e-5)
762
-
763
- Y_local_linear2 = row_linear_forward(Y_local_linear1, split_tensor(W_ref_layer2, dim=1), group)
764
- Y_ref_linear2 = Y_ref_linear1 @ W_ref_layer2.t()
765
- torch.testing.assert_close(Y_local_linear2, Y_ref_linear2, rtol=1e-5, atol=1e-5)
766
-
767
- # Backward
768
- Y_ref_linear2.sum().backward()
769
-
770
- grad_Y = torch.ones_like(Y_ref_linear2)
771
- grad_X_linear2, grad_W_linear2 = row_linear_backward(grad_Y, Y_local_linear1, split_tensor(W_layer2, dim=1),
772
- group)
773
-
774
- torch.testing.assert_close(grad_X_linear2, split_tensor(Y_ref_linear1.grad, dim=1), rtol=1e-5, atol=1e-5)
775
- torch.testing.assert_close(grad_W_linear2, split_tensor(W_ref_layer2.grad, dim=1), rtol=1e-5, atol=1e-5)
776
-
777
- grad_X, grad_W = column_linear_backward(grad_X_linear2, X, split_tensor(W_layer1, dim=0), group)
778
-
779
- torch.testing.assert_close(grad_X, X_ref.grad, rtol=1e-5, atol=1e-5)
780
- torch.testing.assert_close(grad_W, split_tensor(W_ref_layer1.grad, dim=0), rtol=1e-5, atol=1e-5)
781
-
782
- if __name__ == "__main__":
783
- dist.init_process_group("nccl", rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
784
- torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
785
-
786
- example_column_row_linear()
787
- </d-code>
788
-
789
- <p>Now that we've found the most efficient schema for the Feedforward part of the transformer, let's take a look
790
- at the multi-head attention block (MHA).</p>
791
-
792
- <p>We can generally follow a similar approach where the Q, K, V will be split in a Column Parallel fashion and
793
- the output projection will be split along the Row dimension.</p>
794
-
795
- <img src="assets/images/image%2017.png" alt="Multi-Head Attention Block">
796
-
797
- <p>To dive in further particularities, a nice reference paper detailing TP is for instance <a
798
- href="https://arxiv.org/abs/2205.05198">Megatron-LM: Training Multi-Billion Parameter Language Models
799
- Using Model Parallelism</a>.</p>
800
-
801
- <p>Note: Sequence Parallel</p>
802
-
803
- <h3>Sequence Parallelism</h3>
804
-
805
- <p>Tensor parallelism has been a great help to parallelize some of our computation on several GPU nodes with the
806
- limited cost of a few communication operations.</p>
807
-
808
- <p>It also had the additional benefit of reducing memory usage by splitting intermediate activations inside the
809
- feedforward elements across GPUs and thereby reducing the activations to store on each node.</p>
810
-
811
- <p>Could we push this approach further?</p>
812
-
813
- <p>Sequence parallelism applies this same idea to other parts of our model. We've applied tensor parallelism to
814
- two main parts in our models where combination of MLP allowed to naturally split the weights along major
815
- axis.</p>
816
-
817
- <p>The rest of the model mostly comprises layer norms, dropout and various summation of residuals, these
818
- contribute little to the computation but come with rather large forward activations to store.</p>
819
-
820
- <p>[Add some illustration of the forward activations to store for each part]</p>
821
-
822
- <h3>Context Parallelism</h3>
823
-
824
- <p>Even though TP-SP mode helps reduce the memory used by activation values, it has two main drawbacks:</p>
825
- <ol>
826
- <li>Internode connections are usually slow, so the TP degree shouldn't typically exceed 8</li>
827
- <li>The TP degree is limited by the number of Key/Value heads, which is 8 for LLaMA 3 8B.</li>
828
- </ol>
829
-
830
- <p>An empirical estimation is that with TP=8, you can only train an 8B model with a 20K context length. However,
831
- LLaMA 3.1 has managed to scale the context length to 128K by using context parallelism.</p>
832
-
833
- <p>There are several ways to implement sequence parallelism. We used ring attention, which overlaps
834
- communication and computation. LLaMA3.1 uses all-gather along the sequence dimension because it is easier
835
- and more flexible to support different types of attention masks in all-gather based CP attention, such as
836
- the document mask.</p>
837
-
838
- <h3>Pipeline Parallelism</h3>
839
-
840
- <h3>Overlapping computation and communication</h3>
841
-
842
- <h3>ZeRO</h3>
843
-
844
- <h2>II – Architecture</h2>
845
-
846
- <h3>Transformers</h3>
847
-
848
- <h3>Choosing the right dimensions</h3>
849
-
850
- <h3>Positional Embeddings (Learned, RoPE, ALiBi)</h3>
851
-
852
- <h3>RoPE</h3>
853
-
854
- <p>In the transformer model, tokens have no inherent information about their positional information. For these
855
- reasons, we need to use a positional encoding function.</p>
856
-
857
- <p>Assuming that in the multi-head attention layer, <em>q_m</em> is the "position-aware" query vector
858
- corresponding to a token at position <em>m</em>, <em>k_n</em> the "position-aware" key vector corresponding
859
- to the token at position <em>n</em> and <em>f</em> is our position embedding function, we would like our
860
- position vector to be a function of the input vectors and absolute positions like this:</p>
861
-
862
- <d-math>
863
- q_m = f(q,m)
864
- k_n = f(k,n)
865
- </d-math>
866
-
867
- <p>We may also want the positional encoding to model relative positional information between two input tokens.
868
- Relative positions help the model to operate across longer context spans and even context lengths not seen
869
- during training. The attention operation is generally a dot product operation between "position-aware"
870
- vectors <em>q</em> and <em>k</em>, so for a positional encoding that contains relative positional
871
- information, we'll want to have:</p>
872
-
873
- <d-math>
874
- <q_m, k_n> = g(q, k, m-n)
875
- </d-math>
876
-
877
- <p>In other words, we want the result of <em>⟨ 𝑞_𝑚 , 𝑘_𝑛 ⟩</em> to depend on the values of <em>q</em> and
878
- <em>k</em> themselves, as well as their relative position <em>m − n</em>, but not <em>m</em> and <em>n</em>.
879
- This way, the model can focus on the relative difference between two tokens rather than their absolute
880
- positions.
881
- </p>
882
-
883
- <p>Let's show that the RoPE positional embedding formulation satisfies the above formula.</p>
884
-
885
- <p><strong>Rotation matrix</strong></p>
886
-
887
- <p>RoPE are based on rotation matrices which have simple and interesting properties for us. In a 2D space, a
888
- rotation matrix has the following form:</p>
889
-
890
- <d-math>
891
- R(θ) =
892
- \begin{pmatrix}
893
- \cosθ & -\sinθ \\
894
- \sinθ & \cosθ
895
- \end{pmatrix}
896
- </d-math>
897
-
898
- <p>The rotation matrix has the following properties:</p>
899
- <ul>
900
- <li><em>R(θ)</em><sup>T</sup> = <em>R(-θ)</em></li>
901
- <li><em>R(θ<sub>1</sub>)R(θ<sub>2</sub>) = R(θ<sub>1</sub>+θ<sub>2</sub>)</li>
902
  </ul>
903
-
904
- <img src="assets/images/rotation.jpeg" alt="Rotation Matrix">
905
-
906
- <p><strong>RoPE in 2D space</strong></p>
907
-
908
- <p>Assuming <em>q</em> and <em>k</em> are 2D column vectors, we can show that:</p>
909
-
910
- <d-math>
911
- <R(θ_1)q, R(θ_2)k> = (R(θ_1)q)<sup>T</sup> (R(θ_2)k) = q<sup>T</sup>R(-θ_1)R(θ_2)k =
912
- q<sup>T</sup>R(θ_2-θ_1)k = (R(θ_1-θ_2)q)<sup>T</sup>k = <R(θ_1-θ_2)q,k>
913
- </d-math>
914
-
915
- <p>Therefore, if we define our position embedding like this: <em>f(x, m) = R(mθ)x</em> where <em>R</em> is a 2D
916
- rotation matrix, we have <em>q_m = R(mθ)q</em> and <em>k_n = R(nθ)k</em> and then:</p>
917
-
918
- <d-math>
919
- <q_m, k_n> = <R(mθ)q, R(nθ)k> = <R((m-n)θ)q, k>
920
- </d-math>
921
-
922
- <p>We can see that a multiplication with a rotation matrix is exactly the positional encoding we were looking
923
- for. The result of <em>⟨ 𝑞_𝑚 , 𝑘_𝑛 ⟩</em> only depends on <em>q</em>, <em>k</em> and <em>m-n</em>.</p>
924
-
925
- <p><strong>Implementation</strong></p>
926
-
927
- <p>In our case, our internal vectors (the activations in our model) have much more than two elements. Let's pair
928
- elements to get 2D vectors and apply the 2D rotation operation on these pairs.</p>
929
-
930
- <p>There are combinatorially many ways we can pair elements but generally two options are the most popular for
931
- implementing RoPE: we call them the <em>interleaved</em> and <em>non-interleaved</em> versions. (It's still
932
- rather unfortunate to have two popular options)</p>
933
-
934
- <ol>
935
- <li>In the interleaved version, we pair consecutive elements <em>(x<sub>0</sub>,
936
- x<sub>1</sub>),(x<sub>2</sub>,x<sub>3</sub>),…</em> before applying the rotation matrix:</li>
937
- <d-math>
938
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
939
- x_0 \\
940
- x_1 \\
941
- x_2 \\
942
- x_3 \\
943
- \vdots \\
944
- x_{d-2} \\
945
- x_{d-1}
946
- \end{pmatrix}
947
- \odot
948
- \begin{pmatrix}
949
- \cos mθ_0 \\
950
- \cos mθ_0 \\
951
- \cos mθ_1 \\
952
- \cos mθ_1 \\
953
- \vdots \\
954
- \cos mθ_{d/2-1} \\
955
- \cos mθ_{d/2-1}
956
- \end{pmatrix}
957
- +
958
- \begin{pmatrix}
959
- -x_1 \\
960
- x_0 \\
961
- -x_3 \\
962
- x_2 \\
963
- \vdots \\
964
- -x_{d-1} \\
965
- x_{d-2}
966
- \end{pmatrix}
967
- \odot
968
- \begin{pmatrix}
969
- \sin mθ_0 \\
970
- \sin mθ_0 \\
971
- \sin mθ_1 \\
972
- \sin mθ_1 \\
973
- \vdots \\
974
- \sin mθ_{d/2-1} \\
975
- \sin mθ_{d/2-1}
976
- \end{pmatrix}
977
- </d-math>
978
- <d-math>
979
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
980
- x_0\cos mθ_0 - x_1\sin mθ_0 \\
981
- x_1\cos mθ_0 + x_0\sin mθ_0 \\
982
- x_2\cos mθ_1 - x_3\sin mθ_1 \\
983
- x_3\cos mθ_1 + x_2\sin mθ_1 \\
984
- \vdots \\
985
- x_{d-2}\cos mθ_{d/2-1} - x_{d-1}\sin mθ_{d/2-1} \\
986
- x_{d-1}\cos mθ_{d/2-1} + x_{d-2}\sin mθ_{d/2-1}
987
- \end{pmatrix}
988
- </d-math>
989
- <li>In the non-interleaved version, we split the vector in two to pair elements as follows:
990
- <em>(x<sub>0</sub>, x<sub>d/2</sub>),(x<sub>1</sub>,x<sub>d/2+1</sub>),…</em> This is the implementation
991
- used in the <code>transformers</code> library:
992
- </li>
993
- <d-math>
994
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
995
- x_0 \\
996
- x_1 \\
997
- \vdots \\
998
- x_{d/2-1} \\
999
- x_{d/2} \\
1000
- x_{d/2+1} \\
1001
- \vdots \\
1002
- x_{d-1}
1003
- \end{pmatrix}
1004
- \odot
1005
- \begin{pmatrix}
1006
- \cos mθ_0 \\
1007
- \cos mθ_1 \\
1008
- \vdots \\
1009
- \cos mθ_{d/2-1} \\
1010
- \cos mθ_{0} \\
1011
- \cos mθ_{1} \\
1012
- \vdots \\
1013
- \cos mθ_{d/2-1}
1014
- \end{pmatrix}
1015
- +
1016
- \begin{pmatrix}
1017
- -x_{d/2} \\
1018
- -x_{d/2+1} \\
1019
- \vdots \\
1020
- -x_{d-1} \\
1021
- x_{0} \\
1022
- x_{1} \\
1023
- \vdots \\
1024
- x_{d/2-1}
1025
- \end{pmatrix}
1026
- \odot
1027
- \begin{pmatrix}
1028
- \sin mθ_0 \\
1029
- \sin mθ_1 \\
1030
- \vdots \\
1031
- \sin mθ_{d/2-1} \\
1032
- \sin mθ_{0} \\
1033
- \sin mθ_{1} \\
1034
- \vdots \\
1035
- \sin mθ_{d/2-1}
1036
- \end{pmatrix}
1037
- </d-math>
1038
- <d-math>
1039
- R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
1040
- x_0\cos mθ_0 - x_{d/2}\sin mθ_0 \\
1041
- x_1\cos mθ_1 - x_{d/2+1}\sin mθ_1 \\
1042
- \vdots \\
1043
- x_{d/2-1}\cos mθ_{d/2-1} - x_{d-1}\sin mθ_{d/2-1} \\
1044
- x_{d/2}\cos mθ_0 + x_0\sin mθ_0 \\
1045
- x_{d/2+1}\cos mθ_1 + x_0\sin mθ_1 \\
1046
- \vdots \\
1047
- x_{d-1}\cos mθ_{d/2-1} + x_{d-1}\sin mθ_{d/2-1} \\
1048
- \end{pmatrix}
1049
- </d-math>
1050
- <p>The angle of rotation, <em>θ<sub>i</sub></em> is defined as follows, where <em>d</em> is the dimension of
1051
- the attention head:</p>
1052
- <d-math>
1053
- θ<sub>i</sub> = base<sup>-2(i-1)/d</sup>, i \in [1,2,...,d/2]
1054
- </d-math>
1055
- <p>How does this look? When moving the same distance, vectors in some dimensions rotate faster than vectors
1056
- in other dimensions.</p>
1057
- <img src="assets/images/rotation_speed.jpeg" alt="Rotation Speed">
1058
  </ol>
 
 
 
 
 
1059
 
1060
- <h3>Attention (MHA, MQA, GQA)</h3>
1061
-
1062
- <h2>Optimized Operations</h2>
1063
-
1064
- <h3>Flash Attention 1&2&3</h3>
1065
-
1066
- <h3>Fused Kernels</h3>
1067
-
1068
- <h2>III – Training Recipe</h2>
1069
-
1070
- <h3>Batch Size</h3>
1071
-
1072
- <h3>Initialization + rescaling activations inside the model</h3>
1073
-
1074
- <h3>Numerical Precision</h3>
1075
-
1076
- <h4>FP16/BF16/FP8</h4>
1077
-
1078
- <p>@Phuc Nguyen?</p>
1079
-
1080
- <h3>Long Context Training</h3>
1081
-
1082
- <h3>Evaluation</h3>
1083
-
1084
- <p>@Haojun Zhao</p>
1085
-
1086
- <h3>Infini-Attention</h3>
1087
-
1088
- <p>@Phuc Nguyen</p>
1089
-
1090
- <h3>Ring Attention</h3>
1091
-
1092
- <p>@Haojun Zhao</p>
1093
-
1094
- <h3>RoPE scaling / Yarn</h3>
1095
-
1096
- <p>@Haojun Zhao maybe?</p>
1097
-
1098
- <h2>References</h2>
1099
-
1100
- <ul>
1101
- <li>Harm's posts:
1102
- <ul>
1103
- <li><a
1104
- href="https://www.harmdevries.com/post/context-length/">https://www.harmdevries.com/post/context-length/</a>
1105
- </li>
1106
- <li><a
1107
- href="https://www.harmdevries.com/post/model-size-vs-compute-overhead/">https://www.harmdevries.com/post/model-size-vs-compute-overhead/</a>
1108
- </li>
1109
- </ul>
1110
- </li>
1111
- <li>Stas' guides:
1112
- <ul>
1113
- <li><a href="https://github.com/stas00/ml-engineering">https://github.com/stas00/ml-engineering</a>
1114
- </li>
1115
- <li><a
1116
- href="https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md">https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md</a>
1117
- </li>
1118
- </ul>
1119
- </li>
1120
- <li>data parallel: <a
1121
- href="https://siboehm.com/articles/22/data-parallel-training">https://siboehm.com/articles/22/data-parallel-training</a>
1122
- </li>
1123
- <li>ZeRO: <a href="https://arxiv.org/abs/1910.02054">https://arxiv.org/abs/1910.02054</a></li>
1124
- <li>TP/SP + Selective Recomputation: <a
1125
- href="https://arxiv.org/abs/2205.05198">https://arxiv.org/abs/2205.05198</a></li>
1126
- </ul>
1127
- <h2>Conclusion and looking forward</h2>
1128
- <p>Through our open science efforts we hope to keep shining a light on the black box that is the training of
1129
- high performance large language models as well as to give every model trainer the ability to create
1130
- state-of-the-art LLMs. We are excited to continue iterating on FineWeb and to release increasingly better
1131
- filtered subsets of web data, in a fully open and reproducible manner.</p>
1132
- <p>In the short term, we are looking forward to applying the learnings from (English) FineWeb to other
1133
- languages. While English currently dominates the LLM landscape, we believe that making high quality web data
1134
- in other languages as accessible as possible would be incredibly impactful.</p>
1135
- <p>In a nutshell: the future is bright and exciting for studying the science of creating datasets at scale and
1136
- in the open 🤗.</p>
1137
  </d-article>
1138
 
1139
  <d-appendix>
@@ -1159,16 +238,12 @@
1159
  <h3 id="citation">Citation</h3>
1160
  <p>For attribution in academic contexts, please cite this work as</p>
1161
  <pre
1162
- class="citation short">Penedo, et al., "The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale", 2024.</pre>
1163
  <p>BibTeX citation</p>
1164
- <pre class="citation long">@misc{penedo2024finewebdatasetsdecantingweb,
1165
- title={The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale},
1166
- author={Guilherme Penedo and Hynek Kydlíček and Loubna Ben allal and Anton Lozhkov and Margaret Mitchell and Colin Raffel and Leandro Von Werra and Thomas Wolf},
1167
- year={2024},
1168
- eprint={2406.17557},
1169
- archivePrefix={arXiv},
1170
- primaryClass={cs.CL}
1171
- url={https://arxiv.org/abs/2406.17557},
1172
  }</pre>
1173
  </d-appendix>
1174
 
 
7
  <meta name="viewport" content="width=device-width, initial-scale=1">
8
  <meta charset="utf8">
9
  <base target="_blank">
10
+ <title>The Ultra-Scale Playbook: Training LLMs on GPU Clusters</title>
11
  <link rel="stylesheet" href="style.css">
12
  </head>
13
 
14
  <body>
15
  <d-front-matter>
16
  <script id='distill-front-matter' type="text/json">{
17
+ "title": "The Ultra-Scale Playbook: Training LLMs on GPU Clusters",
18
+ "description": "This blog covers everything about scaling LLMs in 2025.",
19
  "published": "Sept 28, 2024",
20
  "affiliation": {"name": "HuggingFace"},
21
  "authors": [
 
37
  </script>
38
  </d-front-matter>
39
  <d-title>
40
+ <h1 class="l-page" style="text-align: center;">The Ultra-Scale Playbook: Training LLMs on GPU Clusters</h1>
41
  <div id="title-plot" class="main-plot-container l-screen">
42
  <figure>
43
  <img src="assets/images/banner.png" alt="FineWeb">
 
51
  <d-article>
52
  <d-contents>
53
  </d-contents>
54
+
55
+ <p>Fueled by the scaling laws<d-cite bibtex-key="kaplan2020scalinglaws"></d-cite><d-cite bibtex-key="hoffmann2022chinchilla"></d-cite>, the trend of training ever larger language models on vaster amounts of data has been driving progress in AI for the past couple years. Initially, the development of the largest models happened exclusively behind closed doors of a handful of research labs but recently opened up more with the release of models such as Llama 3.1 405B<d-cite bibtex-key="grattafiori2024llama3herdmodels"></d-cite> and DeepSeek R1<d-cite bibtex-key="deepseekai2024deepseekv3technicalreport"></d-cite>. While these models have <a href="https://huggingface.co/meta-llama">openly shared</a> <a href="https://huggingface.co/deepseek-ai">weights</a> and their training recipes are described in <a href="https://ai.meta.com/research/publications/the-llama-3-herd-of-models/">technical</a> <a href="https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf">reports</a>, the challenging engineering to involved to train at the necessary infrastructure scale is still hidden between the lines of a handful of papers and complex training frameworks. This ~~long blog post~~ open-source book is here to open this black box!</p>
56
 
 
 
 
 
57
  <aside>Reading time: 7 days. For the best reading experience, we recommend not using a mobile phone.</aside>
58
 
59
+ <p>In this book we invite you to follow us in the wonderful world of scaling training of Large Language Models to tens, hundreds, thousands of GPUs. It assumes you know the basics on LLM architecture and training, but are new to distributed training. This writing can be seen as a second part of a trilogy following our first blog on processing data for pre-training, the so-called “<a href="https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1">FineWeb blog post</a>”. Having read both blog posts, you should have almost all the core knowledge needed to deeply understand how LLMs are being built nowadays, just missing a bit the final spices like data mixing or architecture choices to complete the recipe (stay tuned…).</p>
60
+
61
+ <p>Pre-training LLMs from scratch now requires amounts of compute which exceed in almost every case the use of a single GPU or machine. The clusters used to train these models range from hundreds to thousands of nodes each usually equipped with 4 to 8 GPUs. To make the best use of such an expensive hardware as well as to train in a reasonable time, a range of distributed training methods have been developed with the goal of ensuring that GPUs are highly utilized at all times. Efficiently scaling LLM training is also not confined to pretraining anymore, as fine-tuning larger models on more domain specific data is becoming the standard practice to achieve the best results.</p>
 
 
62
 
63
  <aside>We are extremely thankful to the whole <a href="https://distill.pub/">distill.pub</a> team for creating
64
  the template on which we based this blog post.</aside>
65
+
66
+ <p>In this post we’ll cover these scaling methods exhaustively while keeping a single story-line to understand where each technique comes from. We’ll cover data, tensor, pipeline and context parallelism as well as ZeRO and kernel fusion. The post is built on the following <strong>three foundations</strong>:</p>
67
+
68
+ <p><strong>Quick intros on theory and concepts:</strong> before diving into code and experiments, we want to understand how each method works at a high level and what it’s advantages and limits are. You’ll learn about which parts of a language model eat away your memory and when during training it happens. You’ll learn how we can solve memory constraints by parallelizing the models and increase the throughput by scaling up GPUs. As a result you'll understand how the following widget to compute the memory breakdown of a transformer model works: </p>
69
+
70
  <div id="graph"></div>
71
  <div id="controls">
72
  <div class="cell column-1">
 
169
  </div>
170
  </div>
171
 
172
+ <p>While this widget gives a theoretical breakdown the following tool can be used to predict the memory usage:</p>
173
+
174
+ <p><img alt="image.png" src="assets/images/placeholder.png"/></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
+ <p><strong>Clear code implementations:</strong> theory is one thing, but we discover all kinds of edge cases and important details when we implement something. That’s why we link to implementation references where possible. Depending on the case, we’ll use two code references: the <a href="https://github.com/huggingface/picotron">picotron</a> repository is built for education, thus it implements concepts usually in single, self-contained short files. On the other hand, to look at production ready code, we’ll refer to the <a href="https://github.com/huggingface/nanotron">nanotron</a> implementations which is a production training codebase used at Hugging Face.</p>
 
 
 
 
177
 
178
+ <p><img alt="Picotron implements each key concept in a self-contained way, such that the method can be studied separately and in isolation." src="assets/images/placeholder.png" /></p>
 
 
 
 
 
179
 
180
+ <p><strong>Real training efficiency benchmarks:</strong> Finally, how to <em>actually</em> scale your LLM training depends on your infrastructure, such as the kind of chips, interconnect etc., and we can’t give a single unified recipe. What we will give though is a way to benchmark several setups and it is what we have done on our cluster! We ran over 4100 distributed experiments with up to 512 GPUs to scan many possible distributed training layouts and model sizes. TODO: link to dataset too </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
+ <p><img alt="An overview of the over 4000 experiments across all Llama architectures where each data point corresponds to an experiment launch." src="assets/images/placeholder.png" /></p>
 
 
183
 
184
+ <p>As you can see, there’s a lot of ground to be covered. Before getting into the trenches of distributed training let’s take a quick high level look on we’ll cover in the post.</p>
 
 
185
 
186
+ <h2>TL;DR</h2>
187
+ <p>This book is very extensive so we decide to start with a very general overview of how you can think about distributed training. At a high level, the key challenge in scaling LLM training is to make a training step (forward/backward/optimizer step) with a large batch size the fastest possible.</p>
188
+ <p>When scaling up models and input batches, we quickly end up in situations where either our target batch size won't fit in memory, or/and the model itself is too large to fit in a single GPU's memory.</p>
189
+ <p>To solve this scaling issue we’ll need to carefully evaluate different parallelization strategies and find the optimal balance between three main factors:</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  <ol>
191
+ <li><strong>Memory Usage</strong><ul>
192
+ <li>Hard limitation - if a training step doesn't fit in memory, training cannot proceed</li>
193
+ <li>Sometimes we can increase compute (e.g. recomputation) or increase communication (e.g. ZeRO) to reduce memory usage</li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  </ul>
195
+ </li>
196
+ <li><strong>Compute Efficiency</strong><ul>
197
+ <li>Memory transfer can also decrease compute efficiency.</li>
198
+ <li>We want our hardware to spend most time computing, so we need to reduce time spent on data transfers or unoptimized kernels.</li>
199
+ <li>GPUs need sufficient workload (large enough matrices/batch sizes) to maintain high utilization (compute-bound) otherwise they become memory-bound (limited by memory bandwidth).</li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  </ul>
201
+ </li>
202
+ <li><strong>Communication overhead</strong><ul>
203
+ <li>Two main types. For GPUs: intra-node (NVLink TODO: bandwidth) and inter-node (network TODO: bandwidth)</li>
204
+ <li>Two main attributes: base latency and peak bandwidth. Base latency is a constant overhead that makes us want to do the least number of comms possible, and peak bandwidth controls the how fast we can move data between gpus</li>
205
+ <li>We prioritize using the fastest communication channels (like NVLink) for operations that occur frequently and/or block computation (e.g. tensor parallelism)</li>
206
+ <li>We want to minimize communication overhead as it keeps GPUs idle, so we try to overlap communication with compute as much as possible</li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  </ul>
208
+ </li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  </ol>
210
+ <p>But let’s not get too much ahead of our self and scale progressively. To guide you along the journey and as a practical reference we summarized the key concepts in a cheatsheet:</p>
211
+ <p>[TODO: ADD CHEATSHEET]</p>
212
+ <p>Now that we nailed a few key concept and terms let’s get started by revisiting the basic training steps of an LLM!</p>
213
+
214
+ <h2>First Steps: Training on one GPU</h2>
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  </d-article>
217
 
218
  <d-appendix>
 
238
  <h3 id="citation">Citation</h3>
239
  <p>For attribution in academic contexts, please cite this work as</p>
240
  <pre
241
+ class="citation short">XXX, et al., "The Ultra-Scale Playbook: Training LLMs on GPU Clusterse", 2025.</pre>
242
  <p>BibTeX citation</p>
243
+ <pre class="citation long">@misc{TODO,
244
+ title={The Ultra-Scale Playbook: Training LLMs on GPU Clusters},
245
+ author={TODO},
246
+ year={2025},
 
 
 
 
247
  }</pre>
248
  </d-appendix>
249