Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
0000
Cho, Minsu
Deep Learning Model Design Algorithms for High-Performing Plaintext and Ciphertext Inference PhD Thesis
0000.
@phdthesis{ChoPHD23,
title = {Deep Learning Model Design Algorithms for High-Performing Plaintext and Ciphertext Inference},
author = {Minsu Cho},
url = {https://www.proquest.com/docview/2767241424?pq-origsite=gscholar&fromopenview=true},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Zhou, Dongzhan
Designing Deep Model and Training Paradigm for Object Perception PhD Thesis
0000.
@phdthesis{ZhouPhD2023,
title = {Designing Deep Model and Training Paradigm for Object Perception},
author = {Zhou, Dongzhan
},
url = {https://ses.library.usyd.edu.au/handle/2123/31055},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Shariatzadeh, Seyed Mahdi; Fathy, Mahmood; Berangi, Reza
Improving the accuracy and speed of fast template-matching algorithms by neural architecture search Journal Article
In: Expert Systems, vol. n/a, no. n/a, pp. e13358, 0000.
@article{https://doi.org/10.1111/exsy.13358,
title = {Improving the accuracy and speed of fast template-matching algorithms by neural architecture search},
author = {Seyed Mahdi Shariatzadeh and Mahmood Fathy and Reza Berangi},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/exsy.13358},
doi = {https://doi.org/10.1111/exsy.13358},
journal = {Expert Systems},
volume = {n/a},
number = {n/a},
pages = {e13358},
abstract = {Abstract Neural architecture search can be used to find convolutional neural architectures that are precise and robust while enjoying enough speed for industrial image processing applications. In this paper, our goal is to achieve optimal convolutional neural networks (CNNs) for multiple-templates matching for applications such as licence plates detection (LPD). We perform an iterative local neural architecture search for the models with minimum validation error as well as low computational cost from our search space of about 32 billion models. We describe the findings of the experience and discuss the specifications of the final optimal architectures. About 20-times error reduction and 6-times computational complexity reduction is achieved over our engineered neural architecture after about 500 neural architecture evaluation (in about 10 h). The typical speed of our final model is comparable to classic template matching algorithms while performing more robust and multiple-template matching with different scales.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yang, Yongjia; Zhan, Jinyu; Jiang, Wei; Jiang, Yucheng; Yu, Antai
Neural architecture search for resource constrained hardware devices: A survey Journal Article
In: IET Cyber-Physical Systems: Theory & Applications, vol. n/a, no. n/a, 0000.
@article{https://doi.org/10.1049/cps2.12058,
title = {Neural architecture search for resource constrained hardware devices: A survey},
author = {Yongjia Yang and Jinyu Zhan and Wei Jiang and Yucheng Jiang and Antai Yu},
url = {https://ietresearch.onlinelibrary.wiley.com/doi/abs/10.1049/cps2.12058},
doi = {https://doi.org/10.1049/cps2.12058},
journal = {IET Cyber-Physical Systems: Theory & Applications},
volume = {n/a},
number = {n/a},
abstract = {Abstract With the emergence of powerful and low-energy Internet of Things devices, deep learning computing is increasingly applied to resource-constrained edge devices. However, the mismatch between hardware devices with low computing capacity and the increasing complexity of Deep Neural Network models, as well as the growing real-time requirements, bring challenges to the design and deployment of deep learning models. For example, autonomous driving technologies rely on real-time object detection of the environment, which cannot tolerate the extra latency of sending data to the cloud, processing and then sending the results back to edge devices. Many studies aim to find innovative ways to reduce the size of deep learning models, the number of Floating-point Operations per Second, and the time overhead of inference. Neural Architecture Search (NAS) makes it possible to automatically generate efficient neural network models. The authors summarise the existing NAS methods on resource-constrained devices and categorise them according to single-objective or multi-objective optimisation. We review the search space, the search algorithm and the constraints of NAS on hardware devices. We also explore the challenges and open problems of hardware NAS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yan, Longhao; Wu, Qingyu; Li, Xi; Xie, Chenchen; Zhou, Xilin; Li, Yuqi; Shi, Daijing; Yu, Lianfeng; Zhang, Teng; Tao, Yaoyu; Yan, Bonan; Zhong, Min; Song, Zhitang; Yang, Yuchao; Huang, Ru
In: Advanced Functional Materials, vol. n/a, no. n/a, pp. 2300458, 0000.
@article{https://doi.org/10.1002/adfm.202300458,
title = {Neural Architecture Search with In-Memory Multiply–Accumulate and In-Memory Rank Based on Coating Layer Optimized C-Doped Ge2Sb2Te5 Phase Change Memory},
author = {Longhao Yan and Qingyu Wu and Xi Li and Chenchen Xie and Xilin Zhou and Yuqi Li and Daijing Shi and Lianfeng Yu and Teng Zhang and Yaoyu Tao and Bonan Yan and Min Zhong and Zhitang Song and Yuchao Yang and Ru Huang},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/adfm.202300458},
doi = {https://doi.org/10.1002/adfm.202300458},
journal = {Advanced Functional Materials},
volume = {n/a},
number = {n/a},
pages = {2300458},
abstract = {Abstract Neural architecture search (NAS), as a subfield of automated machine learning, can design neural network models with better performance than manual design. However, the energy and time consumptions of conventional software-based NAS are huge, hindering its development and applications. Herein, 4 Mb phase change memory (PCM) chips are first fabricated that enable two key in-memory computing operations—in-memory multiply-accumulate (MAC) and in-memory rank for efficient NAS. The impacts of the coating layer material are systematically analyzed for the blade-type heating electrode on the device uniformity and in turn NAS performance. The random weights in the searched network architecture can be fine-tuned in the last stage. With 512 × 512 arrays based on 40 nm CMOS process, the PCM-based NAS has achieved 25–53× smaller model size and better performance than manually designed networks and improved the energy and time efficiency by 4779× and 123×, respectively, compared with NAS running on graphic processing unit (GPU). This work can expand the hardware accelerated in-memory operators, and significantly extend the applications of in-memory computing enabled by nonvolatile memory in advanced machine learning tasks.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}