Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2022
Abdelgawad, M.; Mozafari, S. H.; Clark, J. J.; Meyer, B. H.; Gross, W. J.
BERTPerf: Inference Latency Predictor for BERT on ARM big.LITTLE Multi-Core Processors Proceedings Article
In: 2022 IEEE Workshop on Signal Processing Systems (SiPS), pp. 1-6, 2022.
@inproceedings{9919203,
title = {BERTPerf: Inference Latency Predictor for BERT on ARM big.LITTLE Multi-Core Processors},
author = {M. Abdelgawad and S. H. Mozafari and J. J. Clark and B. H. Meyer and W. J. Gross},
url = {https://ieeexplore.ieee.org/abstract/document/9919203},
doi = {10.1109/SiPS55645.2022.9919203},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE Workshop on Signal Processing Systems (SiPS)},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jin, Xiu; Ba, Wenjing; Wang, Lianglong; Zhang, Tong; Zhang, Xiaodan; Li, Shaowen; Rao, Yuan; Liu, Li
A Novel Tran_NAS Method for the Identification of Fe- and Mg-Deficient Pear Leaves from N- and P-Deficient Pear Leaf Data Journal Article
In: ACS Omega, vol. 7, no. 44, pp. 39727-39741, 2022.
@article{doi:10.1021/acsomega.2c03596,
title = {A Novel Tran_NAS Method for the Identification of Fe- and Mg-Deficient Pear Leaves from N- and P-Deficient Pear Leaf Data},
author = {Xiu Jin and Wenjing Ba and Lianglong Wang and Tong Zhang and Xiaodan Zhang and Shaowen Li and Yuan Rao and Li Liu},
url = {https://doi.org/10.1021/acsomega.2c03596},
doi = {10.1021/acsomega.2c03596},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ACS Omega},
volume = {7},
number = {44},
pages = {39727-39741},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Yanxi; Dong, Minjing; Wang, Yunhe; Xu, Chang
Neural Architecture Search via Proxy Validation Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1-16, 2022.
@article{9931480,
title = {Neural Architecture Search via Proxy Validation},
author = {Yanxi Li and Minjing Dong and Yunhe Wang and Chang Xu},
doi = {10.1109/TPAMI.2022.3217648},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
pages = {1-16},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yuan, Liuchun; Huang, Zehao; Wang, Naiyan
PredNAS: A Universal and Sample Efficient Neural Architecture Search Framework Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-14460,
title = {PredNAS: A Universal and Sample Efficient Neural Architecture Search Framework},
author = {Liuchun Yuan and Zehao Huang and Naiyan Wang},
url = {https://doi.org/10.48550/arXiv.2210.14460},
doi = {10.48550/arXiv.2210.14460},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.14460},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Spiridonov, Anton; Akin, Berkin; Xu, Hao; White, Marie Charisse; Zhou, Ping; Gupta, Suyog; Zhou, Yanqi; Long, Yun; Wang, Zhuo
Searching for Efficient Neural Architectures for On-Device ML on Edge TPUs Proceedings Article
In: IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW) (2022) , 2022.
@inproceedings{51797,
title = {Searching for Efficient Neural Architectures for On-Device ML on Edge TPUs},
author = {Anton Spiridonov and Berkin Akin and Hao Xu and Marie Charisse White and Ping Zhou and Suyog Gupta and Yanqi Zhou and Yun Long and Zhuo Wang},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = { IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW) (2022) },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lourens, Matt; Sinayskiy, Ilya; Park, Daniel K.; Blank, Carsten; Petruccione, Francesco
Architecture representations for quantum convolutional neural networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-15073,
title = {Architecture representations for quantum convolutional neural networks},
author = {Matt Lourens and Ilya Sinayskiy and Daniel K. Park and Carsten Blank and Francesco Petruccione},
url = {https://doi.org/10.48550/arXiv.2210.15073},
doi = {10.48550/arXiv.2210.15073},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.15073},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yang, Zhi; Li, Zheyang
Efficient Channel Pruning via Architecture-Guided Search Space Shrinking Proceedings Article
In: Yu, Shiqi; Zhang, Zhaoxiang; Yuen, Pong C.; Han, Junwei; Tan, Tieniu; Guo, Yike; Lai, Jianhuang; Zhang, Jianguo (Ed.): Pattern Recognition and Computer Vision, pp. 540–551, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-18907-4.
@inproceedings{10.1007/978-3-031-18907-4_42,
title = {Efficient Channel Pruning via Architecture-Guided Search Space Shrinking},
author = {Zhi Yang and Zheyang Li},
editor = {Shiqi Yu and Zhaoxiang Zhang and Pong C. Yuen and Junwei Han and Tieniu Tan and Yike Guo and Jianhuang Lai and Jianguo Zhang},
url = {https://link.springer.com/chapter/10.1007/978-3-031-18907-4_42},
isbn = {978-3-031-18907-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Pattern Recognition and Computer Vision},
pages = {540--551},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Recently, channel pruning methods search for the optimal channel numbers by training a weight-sharing network to evaluate architectures of subnetworks. However, the weight shared between subnetworks incurs severe evaluation bias and an accuracy drop. In this paper, we provide a comprehensive understanding of the search space's impact on the evaluation by dissecting the training process of the weight-sharing network analytically. Specifically, it is proved that the sharing weights induce biased noise on gradients, whose magnitude is proportional to the search range of channel numbers and bias is relative to the average channel numbers of the search space. Motivated by the theoretical result, we design a channel pruning method by training a weight-sharing network with search space shrinking. The search space is iteratively shrunk guided by the optimal architecture searched in the weight-sharing network. The reduced search space boosts the accuracy of the evaluation and significantly cuts down the post-processing computation of finetuning. In the end, we demonstrate the superiority of our channel pruning method over state-of-the-art methods with experiments on ImageNet and COCO.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bublea, Adrian; Caleanu, Catalin-Daniel
AutoML and Neural Architecture Search for Gaze Estimation Proceedings Article
In: 16th IEEE International Symposium on Applied Computational Intelligence and Informatics, SACI 2022, Timisoara, Romania, May 25-28, 2022, pp. 143–148, IEEE, 2022.
@inproceedings{DBLP:conf/saci/BubleaC22,
title = {AutoML and Neural Architecture Search for Gaze Estimation},
author = {Adrian Bublea and Catalin-Daniel Caleanu},
url = {https://doi.org/10.1109/SACI55618.2022.9919471},
doi = {10.1109/SACI55618.2022.9919471},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {16th IEEE International Symposium on Applied Computational Intelligence
and Informatics, SACI 2022, Timisoara, Romania, May 25-28, 2022},
pages = {143--148},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Zhen; Du, Haotong; Yao, Quanming; Li, Xuelong
Search to Pass Messages for Temporal Knowledge Graph Completion Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-16740,
title = {Search to Pass Messages for Temporal Knowledge Graph Completion},
author = {Zhen Wang and Haotong Du and Quanming Yao and Xuelong Li},
url = {https://doi.org/10.48550/arXiv.2210.16740},
doi = {10.48550/arXiv.2210.16740},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.16740},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Amein, Marihan; Xiong, Zhuoran; Therrien, Olivier; Meyer, Brett H.; Gross, Warren J.
Work-in-Progress: SuperNAS: Fast Multi-Objective SuperNet Architecture Search for Semantic Segmentation Proceedings Article
In: 2022 International Conference on Compilers, Architecture, and Synthesis for Embedded Systems (CASES), pp. 35-36, 2022.
@inproceedings{9933156,
title = {Work-in-Progress: SuperNAS: Fast Multi-Objective SuperNet Architecture Search for Semantic Segmentation},
author = {Marihan Amein and Zhuoran Xiong and Olivier Therrien and Brett H. Meyer and Warren J. Gross},
url = {https://ieeexplore.ieee.org/abstract/document/9933156},
doi = {10.1109/CASES55004.2022.00024},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 International Conference on Compilers, Architecture, and Synthesis for Embedded Systems (CASES)},
pages = {35-36},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Xuchong; Dai, He; Chen, Jianing; Sun, Hongbin
Efficient Backbone Architecture Search for Stereo Depth Estimation in Autonomous Driving Proceedings Article
In: 2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC), pp. 355-362, 2022.
@inproceedings{9922562,
title = {Efficient Backbone Architecture Search for Stereo Depth Estimation in Autonomous Driving},
author = {Xuchong Zhang and He Dai and Jianing Chen and Hongbin Sun},
url = {https://ieeexplore.ieee.org/abstract/document/9922562},
doi = {10.1109/ITSC55140.2022.9922562},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC)},
pages = {355-362},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Qian, Yaguan; Huang, Shenghui; Wang, Bin; Ling, Xiang; Guan, Xiaohui; Gu, Zhaoquan; Zeng, Shaoning; Zhou, Wujie; Wang, Haijiang
Robust Network Architecture Search via Feature Distortion Restraining Proceedings Article
In: Ävidan, Shai; Brostow, Gabriel; Cissé, Moustapha; Farinella, Giovanni Maria; Hassner, Tal" (Ed.): Computer Vision -- ECCV 2022, pp. 122–138, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-20065-6.
@inproceedings{10.1007/978-3-031-20065-6_8,
title = {Robust Network Architecture Search via Feature Distortion Restraining},
author = {Yaguan Qian and Shenghui Huang and Bin Wang and Xiang Ling and Xiaohui Guan and Zhaoquan Gu and Shaoning Zeng and Wujie Zhou and Haijiang Wang},
editor = {Shai Ävidan and Gabriel Brostow and Moustapha Cissé and Giovanni Maria Farinella and Tal" Hassner},
url = {https://link.springer.com/chapter/10.1007/978-3-031-20065-6_8},
isbn = {978-3-031-20065-6},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Computer Vision -- ECCV 2022},
pages = {122--138},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The vulnerability of Deep Neural Networks, i.e., susceptibility to adversarial attacks, severely limits the application of DNNs in security-sensitive domains. Most of existing methods improve model robustness from weight optimization, such as adversarial training. However, the architecture of DNNs is also a key factor to robustness, which is often neglected or underestimated. We propose Robust Network Architecture Search (RNAS) to obtain a robust network against adversarial attacks. We observe that an adversarial perturbation distorting the non-robust features in latent feature space can further aggravate misclassification. Based on this observation, we search the robust architecture through restricting feature distortion in the search process. Specifically, we define a network vulnerability metric based on feature distortion as a constraint in the search process. This process is modeled as a multi-objective bilevel optimization problem and a novel algorithm is proposed to solve this optimization. Extensive experiments conducted on CIFAR-10/100 and SVHN show that RNAS achieves the best robustness under various adversarial attacks compared with extensive baselines and SOTA methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Schrodi, Simon; Stoll, Danny; Ru, Binxin; Sukthanker, Rhea; Brox, Thomas; Hutter, Frank
Towards Discovering Neural Architectures from Scratch Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-01842,
title = {Towards Discovering Neural Architectures from Scratch},
author = {Simon Schrodi and Danny Stoll and Binxin Ru and Rhea Sukthanker and Thomas Brox and Frank Hutter},
url = {https://doi.org/10.48550/arXiv.2211.01842},
doi = {10.48550/arXiv.2211.01842},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.01842},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
C., Vishak Prasad; White, Colin; Jain, Paarth; Nayak, Sibasis; Ramakrishnan, Ganesh
Speeding up NAS with Adaptive Subset Selection Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-01454,
title = {Speeding up NAS with Adaptive Subset Selection},
author = {Vishak Prasad C. and Colin White and Paarth Jain and Sibasis Nayak and Ganesh Ramakrishnan},
url = {https://doi.org/10.48550/arXiv.2211.01454},
doi = {10.48550/arXiv.2211.01454},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.01454},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
C., Vishak Prasad; White, Colin; Jain, Paarth; Nayak, Sibasis; Ramakrishnan, Ganesh
Speeding up NAS with Adaptive Subset Selection Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-01454b,
title = {Speeding up NAS with Adaptive Subset Selection},
author = {Vishak Prasad C. and Colin White and Paarth Jain and Sibasis Nayak and Ganesh Ramakrishnan},
url = {https://doi.org/10.48550/arXiv.2211.01454},
doi = {10.48550/arXiv.2211.01454},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.01454},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ang, Koon Meng; El-kenawy, El-Sayed M.; Abdelhamid, Abdelaziz A.; Ibrahim, Abdelhameed; Alharbi, Amal H.; Khafaga, Doaa Sami; Tiang, Sew Sun; Lim, Wei Hong
Optimal Design of Convolutional Neural Network Architectures Using Teaching–Learning-Based Optimization for Image Classification Journal Article
In: Symmetry, vol. 14, no. 11, 2022, ISSN: 2073-8994.
@article{sym14112323,
title = {Optimal Design of Convolutional Neural Network Architectures Using Teaching-Learning-Based Optimization for Image Classification},
author = {Koon Meng Ang and El-Sayed M. El-kenawy and Abdelaziz A. Abdelhamid and Abdelhameed Ibrahim and Amal H. Alharbi and Doaa Sami Khafaga and Sew Sun Tiang and Wei Hong Lim},
url = {https://www.mdpi.com/2073-8994/14/11/2323},
doi = {10.3390/sym14112323},
issn = {2073-8994},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Symmetry},
volume = {14},
number = {11},
abstract = {Convolutional neural networks (CNNs) have exhibited significant performance gains over conventional machine learning techniques in solving various real-life problems in computational intelligence fields, such as image classification. However, most existing CNN architectures were handcrafted from scratch and required significant amounts of problem domain knowledge from designers. A novel deep learning method abbreviated as TLBOCNN is proposed in this paper by leveraging the excellent global search ability of teaching-learning-based optimization (TLBO) to obtain an optimal design of network architecture for a CNN based on the given dataset with symmetrical distribution of each class of data samples. A variable-length encoding scheme is first introduced in TLBOCNN to represent each learner as a potential CNN architecture with different layer parameters. During the teacher phase, a new mainstream architecture computation scheme is designed to compute the mean parameter values of CNN architectures by considering the information encoded into the existing population members with variable lengths. The new mechanisms of determining the differences between two learners with variable lengths and updating their positions are also devised in both the teacher and learner phases to obtain new learners. Extensive simulation studies report that the proposed TLBOCNN achieves symmetrical performance in classifying the majority of MNIST-variant datasets, displays the highest accuracy, and produces CNN models with the lowest complexity levels compared to other state-of-the-art methods due to its promising search ability.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Dichao; Yamasaki, Toshihiko; Wang, Yu; Mase, Kenji; Kato, Jien
Toward Extremely Lightweight Distracted Driver Recognition With Distillation-Based Neural Architecture Search and Knowledge Transfer Journal Article
In: IEEE Transactions on Intelligent Transportation Systems, pp. 1-14, 2022.
@article{9940550,
title = {Toward Extremely Lightweight Distracted Driver Recognition With Distillation-Based Neural Architecture Search and Knowledge Transfer},
author = {Dichao Liu and Toshihiko Yamasaki and Yu Wang and Kenji Mase and Jien Kato},
url = {https://ieeexplore.ieee.org/document/9940550},
doi = {10.1109/TITS.2022.3217342},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Intelligent Transportation Systems},
pages = {1-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Haichao; Li, Jiashi; Xia, Xin; Hao, Kuangrong; Xiao, Xuefeng
Multi-Objective Evolutionary for Object Detection Mobile Architectures Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-02791,
title = {Multi-Objective Evolutionary for Object Detection Mobile Architectures Search},
author = {Haichao Zhang and Jiashi Li and Xin Xia and Kuangrong Hao and Xuefeng Xiao},
url = {https://doi.org/10.48550/arXiv.2211.02791},
doi = {10.48550/arXiv.2211.02791},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.02791},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Jialin; Chen, Renxiang; Huang, Xianzhen; Qu, Yongzhi
Development of Deep Residual Neural Networks for Gear Pitting Fault Diagnosis Using Bayesian Optimization Journal Article
In: IEEE Transactions on Instrumentation and Measurement, vol. 71, pp. 1-15, 2022.
@article{9938963,
title = {Development of Deep Residual Neural Networks for Gear Pitting Fault Diagnosis Using Bayesian Optimization},
author = {Jialin Li and Renxiang Chen and Xianzhen Huang and Yongzhi Qu},
doi = {10.1109/TIM.2022.3219476},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Instrumentation and Measurement},
volume = {71},
pages = {1-15},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Singh, Gagandeep; Alser, Mohammed; Khodamoradi, Alireza; Denolf, Kristof; Firtina, Can; Cavlak, Meryem Banu; Corporaal, Henk; Mutlu, Onur
A Framework for Designing Efficient Deep Learning-Based Genomic Basecallers Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-03079,
title = {A Framework for Designing Efficient Deep Learning-Based Genomic Basecallers},
author = {Gagandeep Singh and Mohammed Alser and Alireza Khodamoradi and Kristof Denolf and Can Firtina and Meryem Banu Cavlak and Henk Corporaal and Onur Mutlu},
url = {https://doi.org/10.48550/arXiv.2211.03079},
doi = {10.48550/arXiv.2211.03079},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.03079},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Nguyen, Duy-Anh; Tran, Xuan-Tu; Iacopi, Francesca
GAQ-SNN: A Genetic Algorithm based Quantization Framework for Deep Spiking Neural Networks Proceedings Article
In: 2022 International Conference on IC Design and Technology (ICICDT), pp. 93-96, 2022.
@inproceedings{9933070,
title = {GAQ-SNN: A Genetic Algorithm based Quantization Framework for Deep Spiking Neural Networks},
author = {Duy-Anh Nguyen and Xuan-Tu Tran and Francesca Iacopi},
url = {https://ieeexplore.ieee.org/abstract/document/9933070},
doi = {10.1109/ICICDT56182.2022.9933070},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 International Conference on IC Design and Technology (ICICDT)},
pages = {93-96},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhai, Leilei; Wang, Dianwei; Fang, Jie; Xu, Zhijie
Auto-DenseNet: DenseNet Optimization Method Based on Evolutionary Algorithm Proceedings Article
In: 2022 5th International Conference on Intelligent Autonomous Systems (ICoIAS), pp. 387-393, 2022.
@inproceedings{9931240,
title = {Auto-DenseNet: DenseNet Optimization Method Based on Evolutionary Algorithm},
author = {Leilei Zhai and Dianwei Wang and Jie Fang and Zhijie Xu},
url = {https://ieeexplore.ieee.org/abstract/document/9931240},
doi = {10.1109/ICoIAS56028.2022.9931240},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 5th International Conference on Intelligent Autonomous Systems (ICoIAS)},
pages = {387-393},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Sixing; Nguyen, Phuong; Abebe, Waqwoya; Stanley, Justin; Munoz, Pablo; Jannesari, Ali
Resource-Aware Heterogeneous Federated Learning using Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-05716,
title = {Resource-Aware Heterogeneous Federated Learning using Neural Architecture Search},
author = {Sixing Yu and Phuong Nguyen and Waqwoya Abebe and Justin Stanley and Pablo Munoz and Ali Jannesari},
url = {https://doi.org/10.48550/arXiv.2211.05716},
doi = {10.48550/arXiv.2211.05716},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.05716},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Mo, Hyunho; Iacca, Giovanni
Äccelerating Evolutionary Neural Architecture Search for Remaining Useful Life Prediction Proceedings Article
In: Mernik, Marjan; Eftimov, Tome; Črepinšek, Matej (Ed.): Bioinspired Optimization Methods and Their Applications, pp. 15–30, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-21094-5.
@inproceedings{10.1007/978-3-031-21094-5_2,
title = {Äccelerating Evolutionary Neural Architecture Search for Remaining Useful Life Prediction},
author = {Hyunho Mo and Giovanni Iacca},
editor = {Marjan Mernik and Tome Eftimov and Matej Črepinšek},
isbn = {978-3-031-21094-5},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Bioinspired Optimization Methods and Their Applications},
pages = {15--30},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Deep neural networks (DNNs) obtained remarkable achievements in remaining useful life (RUL) prediction of industrial components. The architectures of these DNNs are usually determined empirically, usually with the goal of minimizing prediction error without considering the time needed for training. However, such a design process is time-consuming as it is essentially based on trial-and-error. Moreover, this process may be inappropriate in those industrial applications where the DNN model should take into account not only the prediction accuracy but also the training computational cost. To address this challenge, we present a neural architecture search (NAS) technique based on an evolutionary algorithm (EA) that explores the combinatorial parameter space of a one-dimensional convolutional neural network (1-D CNN) to search for the best architectures in terms of a trade-off between RUL prediction error and number of trainable parameters. In particular, a novel way to accelerate the NAS is introduced in this paper. We successfully shorten the lengthy training process by making use of two techniques, namely architecture score without training and extrapolation of learning curves. We test our method on a recent benchmark dataset, the N-CMAPSS, on which we search for trade-off solutions (in terms of prediction error vs. number of trainable parameters) using NAS. The results show that our method considerably reduces the training time (and, as a consequence, the total time of the evolutionary search), yet successfully discovers architectures compromising the two objectives.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rao, Xuan; Zhao, Bo; Yi, Xiaosong; Liu, Derong
CR-LSO: Convex Neural Architecture Optimization in the Latent Space of Graph Variational Autoencoder with Input Convex Neural Networks Technical Report
2022.
@techreport{rao2022cr,
title = {CR-LSO: Convex Neural Architecture Optimization in the Latent Space of Graph Variational Autoencoder with Input Convex Neural Networks},
author = {Xuan Rao and Bo Zhao and Xiaosong Yi and Derong Liu},
url = {https://arxiv.org/abs/2211.05950},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {arXiv preprint arXiv:2211.05950},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Liu, Xiaobo; Gong, Xin; Plaza, Antonio; Cai, Zhihua; Xiao, Xiao; Jiang, Xinwei; Liu, Xiang
MO-CNN: Multiobjective Optimization of Convolutional Neural Networks for Hyperspectral Image Classification Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, vol. 60, pp. 1-14, 2022.
@article{9943534,
title = {MO-CNN: Multiobjective Optimization of Convolutional Neural Networks for Hyperspectral Image Classification},
author = {Xiaobo Liu and Xin Gong and Antonio Plaza and Zhihua Cai and Xiao Xiao and Xinwei Jiang and Xiang Liu},
url = {https://ieeexplore.ieee.org/abstract/document/9943534},
doi = {10.1109/TGRS.2022.3220748},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = {60},
pages = {1-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nguyen, Hoang Mark; Derakhshani, Reza
NAS For efficient mobile eyebrow biometrics Journal Article
In: Pattern Recognition Letters, vol. 164, pp. 276-283, 2022, ISSN: 0167-8655.
@article{NGUYEN2022276,
title = {NAS For efficient mobile eyebrow biometrics},
author = {Hoang Mark Nguyen and Reza Derakhshani},
url = {https://www.sciencedirect.com/science/article/pii/S0167865522003385},
doi = {https://doi.org/10.1016/j.patrec.2022.11.009},
issn = {0167-8655},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Pattern Recognition Letters},
volume = {164},
pages = {276-283},
abstract = {Occlusions, such as those due to wearing surgical masks, pose a significant challenge to the face recognition systems. Among possible remedies, ocular biometric has proven to be a popular choice. However, the upper ocular regions, especially the patterns presented by the eyebrows, have yet to gain the attention they deserve. In this work, we leverage Neural Architecture Search (NAS) to discover better-performing architectures for eyebrow recognition. To reduce the computational complexity, we apply a zero-shot NAS to assess the exploratory architectures’ performance prior to any training. We were able to discover three new architectures that achieved competitive accuracies in eyebrow recognition. In doing so, we explored depthwise separable convolution, hard-swish, and Arcface loss functions to further enhance the discovered models in terms of accuracy and number of parameters. Our best result provided 0.999 AUC, 0.6% EER, and 98.25% GMR at 10−3 FMR over FACES dataset, which is better than the results of state-of-the-art architecture, a 29-layer lightCNN which has 21× more parameters and 8× more FLOPS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yi, Yun; Zhang, Haokui; Hu, Wenze; Wang, Nannan; Wang, Xiaoyu
NAR-Former: Neural Architecture Representation Learning towards Holistic Attributes Prediction Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-08024,
title = {NAR-Former: Neural Architecture Representation Learning towards Holistic Attributes Prediction},
author = {Yun Yi and Haokui Zhang and Wenze Hu and Nannan Wang and Xiaoyu Wang},
url = {https://doi.org/10.48550/arXiv.2211.08024},
doi = {10.48550/arXiv.2211.08024},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.08024},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gambella, Matteo; Falcetta, Alessandro; Roveri, Manuel
CNAS: Constrained Neural Architecture Search Proceedings Article
In: 2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC), pp. 2918-2923, 2022.
@inproceedings{9945080,
title = {CNAS: Constrained Neural Architecture Search},
author = {Matteo Gambella and Alessandro Falcetta and Manuel Roveri},
url = {https://ieeexplore.ieee.org/abstract/document/9945080},
doi = {10.1109/SMC53654.2022.9945080},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC)},
pages = {2918-2923},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Weil, Victor Alexandre Gomes; Florindo, Joao Batista
Neural Architecture Search Applied to Hybrid Morphological Neural Networks Proceedings Article
In: ao Carlos Xavier-Junior, Jo; Rios, Ricardo Ara'ujo (Ed.): Intelligent Systems, pp. 631–645, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-21689-3.
@inproceedings{10.1007/978-3-031-21689-3_44,
title = {Neural Architecture Search Applied to Hybrid Morphological Neural Networks},
author = {Victor Alexandre Gomes Weil and Joao Batista Florindo},
editor = {Jo ao Carlos Xavier-Junior and Ricardo Ara'ujo Rios},
url = {https://link.springer.com/chapter/10.1007/978-3-031-21689-3_44},
isbn = {978-3-031-21689-3},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Intelligent Systems},
pages = {631--645},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {This work addresses a way to train morphological neural network differentially using backpropagation. The proposed algorithm can also learn whether to use erosion or dilation, based on the data being processed. Finally, we apply architecture search techniques in order to find the best architecture that may include classical and/or morphological operations. The proposed method coupled with architecture search techniques shows significant improvements on the evaluated data sets.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Luo, Xiangzhong; Liu, Di; Kong, Hao; Huai, Shuo; Chen, Hui; Liu, Weichen
Work-in-Progress: What to Expect of Early Training Statistics? An Investigation on Hardware-Aware Neural Architecture Search Proceedings Article
In: 2022 International Conference on Hardware/Software Codesign and System Synthesis (CODES+ISSS), pp. 1-2, 2022.
@inproceedings{9943182,
title = {Work-in-Progress: What to Expect of Early Training Statistics? An Investigation on Hardware-Aware Neural Architecture Search},
author = {Xiangzhong Luo and Di Liu and Hao Kong and Shuo Huai and Hui Chen and Weichen Liu},
doi = {10.1109/CODES-ISSS55005.2022.00007},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 International Conference on Hardware/Software Codesign and System Synthesis (CODES+ISSS)},
pages = {1-2},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ye, Lingling; Zhang, Chi; Li, Mingxia; Han, Zhenhua; Tan, Haisheng
Cross-Model Operator Batching for Neural Network Architecture Search Proceedings Article
In: Wang, Lei; Segal, Michael; Chen, Jenhui; Qiu, Tie (Ed.): Wireless Algorithms, Systems, and Applications, pp. 231–242, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19214-2.
@inproceedings{10.1007/978-3-031-19214-2_19,
title = {Cross-Model Operator Batching for Neural Network Architecture Search},
author = {Lingling Ye and Chi Zhang and Mingxia Li and Zhenhua Han and Haisheng Tan},
editor = {Lei Wang and Michael Segal and Jenhui Chen and Tie Qiu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19214-2_19},
isbn = {978-3-031-19214-2},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Wireless Algorithms, Systems, and Applications},
pages = {231--242},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Recently, automated machine learning (AutoML) and neural architecture search (NAS), regarded as promising techniques to design deep learning (DL) models automatically, have received increasing attention from both industry and academia. NAS will generate a large number of candidate models, which typically consist of numerous common substructures, providing a vast opportunity for cross-model optimization (e.g., operator batching) to improve training efficiency. However, most of the existing AutoML frameworks do not make use of operator batching and we also lack an efficient batching strategy. In this work, we propose a heuristic scheme named DPBat to guide the operator batching among multiple models in NAS. For most models, the operator batching of DPBat can be finished in just a few seconds, which is negligible compared to the subsequent training. We adopt Microsoft's open source AutoML framework NNI to implement DPBat to real NAS scenarios. Extensive experiments show that DPBat is highly effective in improving training efficiency and reducing the overhead of operator batching, with a throughput 3.7$$backslashtimes $$texttimeshigher than the standard practice of running each job without batching.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Yun; Zhang, Chaobo; Wang, Junli
GNN-EA: Graph Neural Network with Evolutionary Algorithm Proceedings Article
In: 2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC), pp. 1476-1481, 2022.
@inproceedings{9945338,
title = {GNN-EA: Graph Neural Network with Evolutionary Algorithm},
author = {Yun Huang and Chaobo Zhang and Junli Wang},
url = {https://ieeexplore.ieee.org/abstract/document/9945338},
doi = {10.1109/SMC53654.2022.9945338},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC)},
pages = {1476-1481},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Firouzian, Negin; Mozafari, Seyyed Hasan; Clark, James J.; Gross, Warren J.; Meyer, Brett H.
Work-in-Progress: Utilizing latency and accuracy predictors for efficient hardware-aware NAS Proceedings Article
In: 2022 International Conference on Hardware/Software Codesign and System Synthesis (CODES+ISSS), pp. 15-16, 2022.
@inproceedings{9943117,
title = {Work-in-Progress: Utilizing latency and accuracy predictors for efficient hardware-aware NAS},
author = {Negin Firouzian and Seyyed Hasan Mozafari and James J. Clark and Warren J. Gross and Brett H. Meyer},
url = {https://ieeexplore.ieee.org/abstract/document/9943117},
doi = {10.1109/CODES-ISSS55005.2022.00014},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 International Conference on Hardware/Software Codesign and System Synthesis (CODES+ISSS)},
pages = {15-16},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Taojiannan; Yang, Linjie; Jin, Xiaojie; Chen, Chen
Revisiting Training-free NAS Metrics: An Efficient Training-based Method Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-08666,
title = {Revisiting Training-free NAS Metrics: An Efficient Training-based Method},
author = {Taojiannan Yang and Linjie Yang and Xiaojie Jin and Chen Chen},
url = {https://doi.org/10.48550/arXiv.2211.08666},
doi = {10.48550/arXiv.2211.08666},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.08666},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wei, Lanning; He, Zhiqiang; Zhao, Huan; Yao, Quanming
Enhancing Intra-class Information Extraction for Heterophilous Graphs: One Neural Architecture Search Approach Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-10990,
title = {Enhancing Intra-class Information Extraction for Heterophilous Graphs: One Neural Architecture Search Approach},
author = {Lanning Wei and Zhiqiang He and Huan Zhao and Quanming Yao},
url = {https://doi.org/10.48550/arXiv.2211.10990},
doi = {10.48550/arXiv.2211.10990},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.10990},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Shahawy, Mohamed; Benkhelifa, Elhadj
HiveNAS: Neural Architecture Search using Artificial Bee Colony Optimization Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-10250,
title = {HiveNAS: Neural Architecture Search using Artificial Bee Colony Optimization},
author = {Mohamed Shahawy and Elhadj Benkhelifa},
url = {https://doi.org/10.48550/arXiv.2211.10250},
doi = {10.48550/arXiv.2211.10250},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.10250},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Lanfei; Xie, Lingxi; Bi, Kaifeng; Zhao, Kaili; Guo, Jun; Tian, Qi
M2NAS: Joint Neural Architecture Optimization System with Network Transmission Journal Article
In: IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, pp. 1-1, 2022.
@article{9956879,
title = {M2NAS: Joint Neural Architecture Optimization System with Network Transmission},
author = {Lanfei Wang and Lingxi Xie and Kaifeng Bi and Kaili Zhao and Jun Guo and Qi Tian},
url = {https://ieeexplore.ieee.org/abstract/document/9956879},
doi = {10.1109/TCAD.2022.3223852},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Guo, Bicheng; Guo, Shuxuan; Shi, Miaojing; Cheng, Peng; He, Shibo; Chen, Jiming; Yu, Kaicheng
(alpha) DARTS Once More: Enhancing Differentiable Architecture Search by Masked Image Modeling Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-10105,
title = {(alpha) DARTS Once More: Enhancing Differentiable Architecture Search by Masked Image Modeling},
author = {Bicheng Guo and Shuxuan Guo and Miaojing Shi and Peng Cheng and Shibo He and Jiming Chen and Kaicheng Yu},
url = {https://doi.org/10.48550/arXiv.2211.10105},
doi = {10.48550/arXiv.2211.10105},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.10105},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
He, Xin; Yao, Jiangchao; Wang, Yuxin; Tang, Zhenheng; Cheung, Ka Chun; See, Simon; Han, Bo; Chu, Xiaowen
NAS-LID: Efficient Neural Architecture Search with Local Intrinsic Dimension Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-12759,
title = {NAS-LID: Efficient Neural Architecture Search with Local Intrinsic Dimension},
author = {Xin He and Jiangchao Yao and Yuxin Wang and Zhenheng Tang and Ka Chun Cheung and Simon See and Bo Han and Xiaowen Chu},
url = {https://doi.org/10.48550/arXiv.2211.12759},
doi = {10.48550/arXiv.2211.12759},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.12759},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hakim, Tal
Accuracy Prediction for NAS Acceleration using Feature Selection and Extrapolation Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-12419,
title = {Accuracy Prediction for NAS Acceleration using Feature Selection and Extrapolation},
author = {Tal Hakim},
url = {https://doi.org/10.48550/arXiv.2211.12419},
doi = {10.48550/arXiv.2211.12419},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.12419},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Milardo, Sebastiano; Rathore, Punit; Santi, Paolo; Ratti, Carlo
Ä Data-Driven Framework for Driving Style Classification Proceedings Article
In: Chen, Weitong; Yao, Lina; Cai, Taotao; Pan, Shirui; Shen, Tao; Li, Xue (Ed.): Ädvanced Data Mining and Applications", pp. 253–265, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-22137-8.
@inproceedings{10.1007/978-3-031-22137-8_19,
title = {Ä Data-Driven Framework for Driving Style Classification},
author = {Sebastiano Milardo and Punit Rathore and Paolo Santi and Carlo Ratti},
editor = {Weitong Chen and Lina Yao and Taotao Cai and Shirui Pan and Tao Shen and Xue Li},
url = {https://link.springer.com/chapter/10.1007/978-3-031-22137-8_19},
isbn = {978-3-031-22137-8},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Ädvanced Data Mining and Applications"},
pages = {253--265},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Traditional driving behaviour recognition algorithms leverage hand-crafted features extracted from raw driving data and then apply user-defined machine learning models to identify driving behaviours. However, such solutions are limited by the set of selected features and by the chosen model. In this work, we present a data-driven driving behaviour recognition framework that utilizes an unsupervised feature extraction and feature selection algorithm and a deep neural network architecture obtained using an Automated Machine Learning (AutoML) approach. To validate the feasibility of this solution, numerical evaluations were performed on a unique real-world driving datasets collected from 29 professional truck drivers in uncontrolled environments, including supervisor's scoring of driver behavior that is used as ground truth data. Our experimental results show that the proposed deep neural network model achieves up to $$95backslash%$$95%accuracy for multi-class classification, significantly outperforming five other popular machine learning models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shen, Jinbo; Luo, Mengting; Liu, Han; Liao, Peixi; Chen, Hu; Zhang, Yi
MLF-IOSC: Multi-Level Fusion Network with Independent Operation Search Cell for Low-Dose CT Denoising Journal Article
In: IEEE Transactions on Medical Imaging, pp. 1-1, 2022.
@article{9963565,
title = {MLF-IOSC: Multi-Level Fusion Network with Independent Operation Search Cell for Low-Dose CT Denoising},
author = {Jinbo Shen and Mengting Luo and Han Liu and Peixi Liao and Hu Chen and Yi Zhang},
url = {https://ieeexplore.ieee.org/abstract/document/9963565},
doi = {10.1109/TMI.2022.3224396},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Medical Imaging},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Oscal Tzyh-Chiang; Chang, Yu-Xuan; Jhao, Yu-Wei; Chung, Chih-Yu; Chang, Yun-Ling; Huang, Wei-Hsiang
3D Object Detection of Cars and Pedestrians by Deep Neural Networks from Unit-Sharing One-Shot NAS Proceedings Article
In: 2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS), pp. 1-8, 2022.
@inproceedings{9959427,
title = {3D Object Detection of Cars and Pedestrians by Deep Neural Networks from Unit-Sharing One-Shot NAS},
author = {Oscal Tzyh-Chiang Chen and Yu-Xuan Chang and Yu-Wei Jhao and Chih-Yu Chung and Yun-Ling Chang and Wei-Hsiang Huang},
url = {https://ieeexplore.ieee.org/abstract/document/9959427},
doi = {10.1109/AVSS56176.2022.9959427},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)},
pages = {1-8},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Prabhakar, Swaroop N.; Deshwal, Ankur; Mishra, Rahul; Kim, Hyeonsu
DistilNAS: Neural Architecture Search With Distilled Data Journal Article
In: IEEE Access, vol. 10, pp. 124990-124998, 2022.
@article{9963961,
title = {DistilNAS: Neural Architecture Search With Distilled Data},
author = {Swaroop N. Prabhakar and Ankur Deshwal and Rahul Mishra and Hyeonsu Kim},
url = {https://ieeexplore.ieee.org/document/9963961},
doi = {10.1109/ACCESS.2022.3224788},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Access},
volume = {10},
pages = {124990-124998},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurlahosur, Sunil V.; Meena, S. M.; Kulkarni, Uday; Dcosta, Winston; Lokur, Vineet; Sirigeri, Rohan V.; Porwal, Sajal; Sammed, S. P.; Mudenagudi, Uma
Comparative Analysis of Neural Architecture Search Methods for Classification of Cultural Heritage Sites Proceedings Article
In: Mudenagudi, Uma; Nigam, Aditya; Sarvadevabhatla, Ravi Kiran; Choudhary, Ayesha (Ed.): Proceedings of the Satellite Workshops of ICVGIP 2021, pp. 25–41, Springer Nature Singapore, Singapore, 2022, ISBN: 978-981-19-4136-8.
@inproceedings{10.1007/978-981-19-4136-8_2,
title = {Comparative Analysis of Neural Architecture Search Methods for Classification of Cultural Heritage Sites},
author = {Sunil V. Gurlahosur and S. M. Meena and Uday Kulkarni and Winston Dcosta and Vineet Lokur and Rohan V. Sirigeri and Sajal Porwal and S. P. Sammed and Uma Mudenagudi},
editor = {Uma Mudenagudi and Aditya Nigam and Ravi Kiran Sarvadevabhatla and Ayesha Choudhary},
url = {https://link.springer.com/chapter/10.1007/978-981-19-4136-8_2},
isbn = {978-981-19-4136-8},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the Satellite Workshops of ICVGIP 2021},
pages = {25--41},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {In the current era of Machine Learning, the performance of Neural Networks in object detection, image classification, and video analytics has improved with better design of architecture. It often requires engineers to spend substantial time and effort to design the network, which can be an error-prone method. This method does not exhaustively search the entire search space of possible neural network architecture and guarantees optimal accuracy from the designed model. Neural Architecture Search (NAS) automates this process to find the optimal network to outperform the hand-designed model. Though NAS methods have shown promising performance for image classification tasks, it is challenging to infer why they work well on standard data sets and perform poorly when transferring the same NAS method to custom/real-time data sets. This paper proposes a custom image data set based on Indian Heritage sites built using a crowdsourced framework to perform a comparative performance analysis of NAS methods for the image classification task. The data set consists of 20,000 color images of 1920*1080 pixels from 40 heritage sites, with 16,000 training and 4000 test images. The comparative study is performed on three primary NAS methods viz. Efficient Neural Architecture Search via parameter sharing (ENAS), Differentiable Architecture Search (DARTS), and Neural Architecture Search using Multi-Objective Genetic Algorithm (NSGA-Net). The DARTS showed 88.625% accuracy, ENAS showed 32.83% accuracy and NSGA-Net produced 69.92% accuracy on the custom data set.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Muchen; Liu, Jeffrey Yunfan; Sigal, Leonid; Liao, Renjie
GraphPNAS: Learning Distribution of Good Neural Architectures via Deep Graph Generative Models Technical Report
2022.
@techreport{https://doi.org/10.48550/arxiv.2211.15155,
title = {GraphPNAS: Learning Distribution of Good Neural Architectures via Deep Graph Generative Models},
author = {Muchen Li and Jeffrey Yunfan Liu and Leonid Sigal and Renjie Liao},
url = {https://arxiv.org/abs/2211.15155},
doi = {10.48550/ARXIV.2211.15155},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
publisher = {arXiv},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zeng, Wenxuan; Li, Meng; Xiong, Wenjie; Lu, Wenjie; Tan, Jin; Wang, Runsheng; Huang, Ru
MPCViT: Searching for MPC-friendly Vision Transformer with Heterogeneous Attention Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-13955,
title = {MPCViT: Searching for MPC-friendly Vision Transformer with Heterogeneous Attention},
author = {Wenxuan Zeng and Meng Li and Wenjie Xiong and Wenjie Lu and Jin Tan and Runsheng Wang and Ru Huang},
url = {https://doi.org/10.48550/arXiv.2211.13955},
doi = {10.48550/arXiv.2211.13955},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.13955},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xu, Xianzhe; Jiang, Yiqi; Chen, Weihua; Huang, Yilun; Zhang, Yuan; Sun, Xiuyu
DAMO-YOLO : A Report on Real-Time Object Detection Design Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-15444,
title = {DAMO-YOLO : A Report on Real-Time Object Detection Design},
author = {Xianzhe Xu and Yiqi Jiang and Weihua Chen and Yilun Huang and Yuan Zhang and Xiuyu Sun},
url = {https://doi.org/10.48550/arXiv.2211.15444},
doi = {10.48550/arXiv.2211.15444},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.15444},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Shuai, Zhenhao; Liu, Hongbo; Wan, Zhaolin; Yu, Wei-Jie; Zhang, Jun
A Self-adaptive Neuroevolution Approach to Constructing Deep Neural Network Architectures Across Different Types Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2211-14753,
title = {A Self-adaptive Neuroevolution Approach to Constructing Deep Neural Network Architectures Across Different Types},
author = {Zhenhao Shuai and Hongbo Liu and Zhaolin Wan and Wei-Jie Yu and Jun Zhang},
url = {https://doi.org/10.48550/arXiv.2211.14753},
doi = {10.48550/arXiv.2211.14753},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2211.14753},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}