Skip to content

Commit

Permalink
Add Perseus final version (#276)
Browse files Browse the repository at this point in the history
  • Loading branch information
jaywonchung authored Sep 23, 2024
1 parent 137f2db commit 327a52e
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 0 deletions.
21 changes: 21 additions & 0 deletions source/_data/SymbioticLab.bib
Original file line number Diff line number Diff line change
Expand Up @@ -1682,6 +1682,27 @@ @Article{perseus:arxiv23
}
}
@InProceedings{perseus:sosp24,
author = {Jae-Won Chung and Yile Gu and Insu Jang and Luoxi Meng and Nikhil Bansal and Mosharaf Chowdhury},
booktitle = {SOSP},
title = {Reducing Energy Bloat in Large Model Training},
year = {2024},
month = {Nov},
publist_confkey = {SOSP'24},
publist_link = {paper || perseus-sosp24.pdf},
publist_link = {code || https://github.com/ml-energy/zeus},
publist_link = {website || https://ml.energy/zeus/research_overview/perseus},
publist_topic = {Energy-Efficient Systems},
publist_topic = {Systems + AI},
publist_badge = {Artifacts Available},
publist_badge = {Artifacts Functional},
publist_badge = {Results Reproduced},
publist_abstract = {
Training large AI models on numerous GPUs consumes a massive amount of energy, making power delivery one of the largest limiting factors in building and operating datacenters for AI workloads. However, we observe that not all energy consumed during training directly contributes to end-to-end throughput; a significant portion can be removed without slowing down training. We call this portion energy bloat.
In this work, we identify two independent sources of energy bloat in large model training and propose Perseus, a training system that mitigates both. To do this, Perseus obtains the time–energy tradeoff frontier of a large model training job using an efficient graph cut-based algorithm, and schedules computation energy consumption across time to reduce both types of energy bloat. Evaluation on large models, including GPT-3 and Bloom, shows that Perseus reduces the energy consumption of large model training by up to 30% without any throughput loss or hardware modification.
}}
@Article{llm-survey:arxiv23,
author = {Zhongwei Wan and Xin Wang and Che Liu and Samiul Alam and Yu Zheng and Zhongnan Qu and Shen Yan and Yi Zhu and Quanlu Zhang and Mosharaf Chowdhury and Mi Zhang},
journal = {CoRR},
Expand Down
Binary file not shown.
5 changes: 5 additions & 0 deletions source/publications/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ venues:
SOSP:
category: Conferences
occurrences:
- key: SOSP'24
name: The 30th ACM Symposium on Operating Systems and Principles
date: 2024-11-04
url: https://sigops.org/s/conferences/sosp/2024/
acceptance: 17.34%
- key: SOSP'23
name: The 29th ACM Symposium on Operating Systems and Principles
date: 2023-10-26
Expand Down

0 comments on commit 327a52e

Please sign in to comment.