docs: add section on CPU+GPU abstractions with references

This commit is contained in:
Andrew 2025-12-09 02:24:47 +07:00
parent fdd02841c2
commit 75ed30795e
2 changed files with 57 additions and 0 deletions

View file

@ -411,3 +411,46 @@
author = {Reinders, James and Ashbaugh, Ben and Brodman, James and Kinsner, Michael and Pennycook, John and Tian, Xinmin},
year = {2021},
}
@article{mittal_survey_2015,
title = {A {Survey} of {CPU}-{GPU} {Heterogeneous} {Computing} {Techniques}},
volume = {47},
issn = {0360-0300, 1557-7341},
url = {https://dl.acm.org/doi/10.1145/2788396},
doi = {10.1145/2788396},
abstract = {As both CPUs and GPUs become employed in a wide range of applications, it has been acknowledged that both of these Processing Units (PUs) have their unique features and strengths and hence, CPU-GPU collaboration is inevitable to achieve high-performance computing. This has motivated a significant amount of research on heterogeneous computing techniques, along with the design of CPU-GPU fused chips and petascale heterogeneous supercomputers. In this article, we survey Heterogeneous Computing Techniques (HCTs) such as workload partitioning that enable utilizing both CPUs and GPUs to improve performance and/or energy efficiency. We review heterogeneous computing approaches at runtime, algorithm, programming, compiler, and application levels. Further, we review both discrete and fused CPU-GPU systems and discuss benchmark suites designed for evaluating Heterogeneous Computing Systems (HCSs). We believe that this article will provide insights into the workings and scope of applications of HCTs to researchers and motivate them to further harness the computational powers of CPUs and GPUs to achieve the goal of exascale performance.},
language = {en},
number = {4},
urldate = {2025-12-08},
journal = {ACM Computing Surveys},
author = {Mittal, Sparsh and Vetter, Jeffrey S.},
month = jul,
year = {2015},
pages = {1--35},
}
@book{kirk_programming_2017,
address = {Amsterdam Boston Heidelberg},
edition = {Third edition},
title = {Programming massively parallel processors: a hands-on approach},
isbn = {9780128119860},
shorttitle = {Programming massively parallel processors},
language = {eng},
publisher = {Elsevier, Morgan Kaufmann},
author = {Kirk, David and Hwu, Wen-mei W.},
year = {2017},
}
@misc{jia_beyond_2018,
title = {Beyond {Data} and {Model} {Parallelism} for {Deep} {Neural} {Networks}},
url = {http://arxiv.org/abs/1807.05358},
doi = {10.48550/arXiv.1807.05358},
abstract = {The computational requirements for training deep neural networks (DNNs) have grown to the point that it is now standard practice to parallelize training. Existing deep learning systems commonly use data or model parallelism, but unfortunately, these strategies often result in suboptimal parallelization performance. In this paper, we define a more comprehensive search space of parallelization strategies for DNNs called SOAP, which includes strategies to parallelize a DNN in the Sample, Operation, Attribute, and Parameter dimensions. We also propose FlexFlow, a deep learning framework that uses guided randomized search of the SOAP space to find a fast parallelization strategy for a specific parallel machine. To accelerate this search, FlexFlow introduces a novel execution simulator that can accurately predict a parallelization strategy's performance and is three orders of magnitude faster than prior approaches that have to execute each strategy. We evaluate FlexFlow with six real-world DNN benchmarks on two GPU clusters and show that FlexFlow can increase training throughput by up to 3.8x over state-of-the-art approaches, even when including its search time, and also improves scalability.},
urldate = {2025-12-08},
publisher = {arXiv},
author = {Jia, Zhihao and Zaharia, Matei and Aiken, Alex},
month = jul,
year = {2018},
note = {arXiv:1807.05358},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing},
}