Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2021
Li, Shiqian; Li, Wei; Wen, Shiping; Shi, Kaibo; Yang, Yin; Zhou, Pan; Huang, Tingwen
Auto-FERNet: A Facial Expression Recognition Network with Architecture Search Journal Article
In: IEEE Transactions on Network Science and Engineering, pp. 1-1, 2021.
@article{9442348,
title = {Auto-FERNet: A Facial Expression Recognition Network with Architecture Search},
author = {Shiqian Li and Wei Li and Shiping Wen and Kaibo Shi and Yin Yang and Pan Zhou and Tingwen Huang},
doi = {10.1109/TNSE.2021.3083739},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Network Science and Engineering},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Philipp, George
The Nonlinearity Coefficient - A Practical Guide to Neural Architecture Design Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2105-12210,
title = {The Nonlinearity Coefficient - A Practical Guide to Neural Architecture Design},
author = {George Philipp},
url = {https://arxiv.org/abs/2105.12210},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2105.12210},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yao, Lewei; Pi, Renjie; Xu, Hang; Zhang, Wei; Li, Zhenguo; Zhang, Tong
Joint-DetNAS: Upgrade Your Detector with NAS, Pruning and Dynamic Distillation Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2105-12971,
title = {Joint-DetNAS: Upgrade Your Detector with NAS, Pruning and Dynamic Distillation},
author = {Lewei Yao and Renjie Pi and Hang Xu and Wei Zhang and Zhenguo Li and Tong Zhang},
url = {https://arxiv.org/abs/2105.12971},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2105.12971},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhou, Qinqin; Zhong, Bineng; Liu, Xin; Ji, Rongrong
Attention-Based Neural Architecture Search for Person Re-Identification Journal Article
In: IEEE Transactions on Neural Networks and Learning Systems, pp. 1-13, 2021.
@article{9444559,
title = {Attention-Based Neural Architecture Search for Person Re-Identification},
author = {Qinqin Zhou and Bineng Zhong and Xin Liu and Rongrong Ji},
url = {https://ieeexplore.ieee.org/abstract/document/9444559},
doi = {10.1109/TNNLS.2021.3082701},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {1-13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xu, Jin; Tan, Xu; Luo, Renqian; Song, Kaitao; Li, Jian; Qin, Tao; -, Tie
NAS-BERT: Task-Agnostic and Adaptive-Size BERT Compression with Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2105-14444,
title = {NAS-BERT: Task-Agnostic and Adaptive-Size BERT Compression with Neural Architecture Search},
author = {Jin Xu and Xu Tan and Renqian Luo and Kaitao Song and Jian Li and Tao Qin and Tie -},
url = {https://arxiv.org/abs/2105.14444},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2105.14444},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Haojia; Wu, Shangbin; Chen, Yiping; Li, Wen; Luo, Zhipeng; Guo, Yulan; Wang, Cheng; Li, Jonathan
Semantic segmentation of 3D indoor LiDAR point clouds through feature pyramid architecture search Journal Article
In: ISPRS Journal of Photogrammetry and Remote Sensing, vol. 177, pp. 279-290, 2021, ISSN: 0924-2716.
@article{LIN2021279,
title = {Semantic segmentation of 3D indoor LiDAR point clouds through feature pyramid architecture search},
author = {Haojia Lin and Shangbin Wu and Yiping Chen and Wen Li and Zhipeng Luo and Yulan Guo and Cheng Wang and Jonathan Li},
url = {https://www.sciencedirect.com/science/article/pii/S0924271621001349},
doi = {https://doi.org/10.1016/j.isprsjprs.2021.05.009},
issn = {0924-2716},
year = {2021},
date = {2021-01-01},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
volume = {177},
pages = {279-290},
abstract = {Semantic segmentation of 3D Light Detection and Ranging (LiDAR) indoor point clouds using deep learning has been an active topic in recent years. However, most deep neural networks on point clouds conduct multi-level feature fusion via a simple U-shape architecture, which lacks enough capacity on both classification and localization in the segmentation task. In this paper, we propose a Neural Architecture Search (NAS) method to search a Feature Pyramid Network (FPN) module for 3D indoor point cloud semantic segmentation. Specifically, we aim to automatically find an effective feature pyramid architecture as a feature fusion neck in a designed novel pyramidal search space covering all information communication paths for multi-level features. The searched FPN module, named SFPN, contains the most important connections among all the potential paths to fuse representations at different levels. Our proposed SFPN is generic and effective as well as capable to be added to existing segmentation networks to augment the segmentation performance. Extensive experiments on ScanNet and S3DIS show that consistent and remarkable gains of segmentation performance can be achieved by different classical networks combined with SFPN. Specially, PointNet++-SFPN achieves mIoU gains of 7.8% on ScanNet v2 and 4.7% on S3DIS, and PointConv-SFPN achieves 4.5% and 3.7% improvement respectively on the above datasets.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhao, Yuekai; Dong, Li; Shen, Yelong; Zhang, Zhihua; Wei, Furu; Chen, Weizhu
Memory-Efficient Differentiable Transformer Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2105-14669,
title = {Memory-Efficient Differentiable Transformer Architecture Search},
author = {Yuekai Zhao and Li Dong and Yelong Shen and Zhihua Zhang and Furu Wei and Weizhu Chen},
url = {https://arxiv.org/abs/2105.14669},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2105.14669},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Nader, Andrew; Azar, Danielle
Evolution of Activation Functions: An Empirical Investigation Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2105-14614,
title = {Evolution of Activation Functions: An Empirical Investigation},
author = {Andrew Nader and Danielle Azar},
url = {https://arxiv.org/abs/2105.14614},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2105.14614},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Pan, Zijie; Zeng, Jiajin; Cheng, Riqiang; Yan, Hongyang; Li, Jin
PNAS: A privacy preserving framework for neural architecture search services Journal Article
In: Information Sciences, vol. 573, pp. 370-381, 2021, ISSN: 0020-0255.
@article{PAN2021370,
title = {PNAS: A privacy preserving framework for neural architecture search services},
author = {Zijie Pan and Jiajin Zeng and Riqiang Cheng and Hongyang Yan and Jin Li},
url = {https://www.sciencedirect.com/science/article/pii/S002002552100565X},
doi = {https://doi.org/10.1016/j.ins.2021.05.073},
issn = {0020-0255},
year = {2021},
date = {2021-01-01},
journal = {Information Sciences},
volume = {573},
pages = {370-381},
abstract = {The success of deep neural networks has contributed to many fields, such as finance, medic and speech recognition. Machine learning models adopted in these fields are always trained with a massive amount of distributed and highly personalized data harvested directly from users. Concerns for data privacy and the demand for better data exploitation have prompted the design of several secure schemes that allow an untrusted server to train ML models for one or multiple parties. However, these existing schemes only focus on network parameter, and hardly extend their optimization range to model architecture scope. Sine the performance of a neural network is closely related to both parameter and its architecture, service providers are difficult to deliver customized and flexible neural networks to each client. To this end, in this paper we propose PNAS, a novel MLaaS framework that enables a server to jointly optimize network parameter and architecture while ensuring the privacy of training sets. A double-encryption scheme is derived to prevent privacy leakage from sample itself, as well as intermediate feature maps during training. Specifically, we adopt functional encryption and feature transformation to secure forward and back propagation. Extensive experiments have demonstrated the superiority of our proposal.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bungert, Leon; Roith, Tim; Tenbrinck, Daniel; Burger, Martin
Neural Architecture Search via Bregman Iterations Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-02479,
title = {Neural Architecture Search via Bregman Iterations},
author = {Leon Bungert and Tim Roith and Daniel Tenbrinck and Martin Burger},
url = {https://arxiv.org/abs/2106.02479},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.02479},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Mo, Tong; Liu, Bang
Encoder-Decoder Neural Architecture Optimization for Keyword Spotting Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-02738,
title = {Encoder-Decoder Neural Architecture Optimization for Keyword Spotting},
author = {Tong Mo and Bang Liu},
url = {https://arxiv.org/abs/2106.02738},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.02738},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Miao, Yingjie; Song, Xingyou; Peng, Daiyi; Yue, Summer; Brevdo, Eugene; Faust, Aleksandra
RL-DARTS: Differentiable Architecture Search for Reinforcement Learning Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-02229,
title = {RL-DARTS: Differentiable Architecture Search for Reinforcement Learning},
author = {Yingjie Miao and Xingyou Song and Daiyi Peng and Summer Yue and Eugene Brevdo and Aleksandra Faust},
url = {https://arxiv.org/abs/2106.02229},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.02229},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Saito, Masahiko; Kishimoto, Tomoe; Kaneta, Yuya; Itoh, Taichi; Umeda, Yoshiaki; Tanaka, Junichi; Iiyama, Yutaro; Sawada, Ryu; Terashi, Koji
Event Classification with Multi-step Machine Learning Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-02301,
title = {Event Classification with Multi-step Machine Learning},
author = {Masahiko Saito and Tomoe Kishimoto and Yuya Kaneta and Taichi Itoh and Yoshiaki Umeda and Junichi Tanaka and Yutaro Iiyama and Ryu Sawada and Koji Terashi},
url = {https://arxiv.org/abs/2106.02301},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.02301},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Guo, Xin; Yang, Jianlei; Zhou, Haoyi; Ye, Xucheng; Li, Jianxin
RoSearch: Search for Robust Student Architectures When Distilling Pre-trained Language Models Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-03613,
title = {RoSearch: Search for Robust Student Architectures When Distilling Pre-trained Language Models},
author = {Xin Guo and Jianlei Yang and Haoyi Zhou and Xucheng Ye and Jianxin Li},
url = {https://arxiv.org/abs/2106.03613},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.03613},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Dey, Debadeepta; Shah, Shital; é, S
FEAR: A Simple Lightweight Method to Rank Architectures Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-04010,
title = {FEAR: A Simple Lightweight Method to Rank Architectures},
author = {Debadeepta Dey and Shital Shah and S é},
url = {https://arxiv.org/abs/2106.04010},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.04010},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Na, Byunggook; Mok, Jisoo; Choe, Hyeokjun; Yoon, Sungroh
Accelerating Neural Architecture Search via Proxy Data Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-04784,
title = {Accelerating Neural Architecture Search via Proxy Data},
author = {Byunggook Na and Jisoo Mok and Hyeokjun Choe and Sungroh Yoon},
url = {https://arxiv.org/abs/2106.04784},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.04784},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yao, Yuansheng; Liu, Risheng; Zhang, Jiaao; Zhong, Wei; Fan, Xin; Luo, Zhongxuan
Hardware-Aware Low-Light Image Enhancement via One-Shot Neural Architecture Search with Shrinkage Sampling Proceedings Article
In: 2021 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2021.
@inproceedings{9428092,
title = {Hardware-Aware Low-Light Image Enhancement via One-Shot Neural Architecture Search with Shrinkage Sampling},
author = {Yuansheng Yao and Risheng Liu and Jiaao Zhang and Wei Zhong and Xin Fan and Zhongxuan Luo},
url = {https://ieeexplore.ieee.org/abstract/document/9428092},
doi = {10.1109/ICME51207.2021.9428092},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE International Conference on Multimedia and Expo (ICME)},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Yuxu; Zhong, Guoqiang; Wang, Yanan; Deng, Zhaoyang
Differentiable Light-Weight Architecture Search Proceedings Article
In: 2021 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2021.
@inproceedings{9428132,
title = {Differentiable Light-Weight Architecture Search},
author = {Yuxu Mao and Guoqiang Zhong and Yanan Wang and Zhaoyang Deng},
url = {https://ieeexplore.ieee.org/abstract/document/9428132},
doi = {10.1109/ICME51207.2021.9428132},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE International Conference on Multimedia and Expo (ICME)},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Binbin; Liang, Xiaodan; Zhong, Junhao; Peng, Jiefeng; Wang, Guangrun; Lin, Liang
Unifying Dynamic Optimizer Search and Network Architecture Search Proceedings Article
In: 2021 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2021.
@inproceedings{9428169,
title = {Unifying Dynamic Optimizer Search and Network Architecture Search},
author = {Binbin Yang and Xiaodan Liang and Junhao Zhong and Jiefeng Peng and Guangrun Wang and Liang Lin},
url = {https://ieeexplore.ieee.org/abstract/document/9428169},
doi = {10.1109/ICME51207.2021.9428169},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE International Conference on Multimedia and Expo (ICME)},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Salinas, David; Perrone, Valerio; Cruchant, Olivier; é, C
A multi-objective perspective on jointly tuning hardware and hyperparameters Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-05680,
title = {A multi-objective perspective on jointly tuning hardware and hyperparameters},
author = {David Salinas and Valerio Perrone and Olivier Cruchant and C é},
url = {https://arxiv.org/abs/2106.05680},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.05680},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xue, Xinwei; Meng, Xiangyu; Ma, Long; Wang, Yi; Liu, Risheng; Fan, Xin
Searching Frame-Recurrent Attentive Deformable Network for Real-Time Video Deraining Technical Report
2021.
@techreport{9428351,
title = {Searching Frame-Recurrent Attentive Deformable Network for Real-Time Video Deraining},
author = {Xinwei Xue and Xiangyu Meng and Long Ma and Yi Wang and Risheng Liu and Xin Fan},
url = {https://ieeexplore.ieee.org/abstract/document/9428351},
doi = {10.1109/ICME51207.2021.9428351},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE International Conference on Multimedia and Expo (ICME)},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Eltotongy, Assem; Awad, Mohammed I; Maged, Shady A; Onsy, Ahmed
Fault Detection and Classification of Machinery Bearing Under Variable Operating Conditions Based on Wavelet Transform and CNN Proceedings Article
In: 2021 International Mobile, Intelligent, and Ubiquitous Computing Conference (MIUCC), pp. 117-123, 2021.
@inproceedings{9447673,
title = {Fault Detection and Classification of Machinery Bearing Under Variable Operating Conditions Based on Wavelet Transform and CNN},
author = {Assem Eltotongy and Mohammed I Awad and Shady A Maged and Ahmed Onsy},
url = {https://ieeexplore.ieee.org/abstract/document/9447673},
doi = {10.1109/MIUCC52538.2021.9447673},
year = {2021},
date = {2021-01-01},
booktitle = {2021 International Mobile, Intelligent, and Ubiquitous Computing Conference (MIUCC)},
pages = {117-123},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vargas-Hákim, Gustavo A; Mezura-Montes, Efrén; Acosta-Mesa, Héctor-Gabriel
A Review on Convolutional Neural Networks Encodings for Neuroevolution Journal Article
In: IEEE Transactions on Evolutionary Computation, pp. 1-1, 2021.
@article{9452087,
title = {A Review on Convolutional Neural Networks Encodings for Neuroevolution},
author = {Gustavo A Vargas-Hákim and Efrén Mezura-Montes and Héctor-Gabriel Acosta-Mesa},
url = {https://ieeexplore.ieee.org/abstract/document/9452087},
doi = {10.1109/TEVC.2021.3088631},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Evolutionary Computation},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gu, Hongyang; Fu, Guangyuan; Wang, Xu; Zhu, Jun
Learning auto-scale representations for person re-identification Journal Article
In: Image and Vision Computing, pp. 104241, 2021, ISSN: 0262-8856.
@article{GU2021104241,
title = {Learning auto-scale representations for person re-identification},
author = {Hongyang Gu and Guangyuan Fu and Xu Wang and Jun Zhu},
url = {https://www.sciencedirect.com/science/article/pii/S0262885621001463},
doi = {https://doi.org/10.1016/j.imavis.2021.104241},
issn = {0262-8856},
year = {2021},
date = {2021-01-01},
journal = {Image and Vision Computing},
pages = {104241},
abstract = {Person re-identification (ReID) is a hot topic in computer vision. The data in the ReID is often collected from the cameras with different views and is affected by other environmental factors, which poses a significant challenge to ReID. The omni-scales proposed by OSNet can extract discriminative feature representations, which shows that omni-scales are adequate for the task of the ReID. However, the OSNet is mainly based on a manually designed network architecture. In the OSnet, each block uses the same architecture and has only four scale feature representations. Inspired by neural network architecture search (NAS), we propose a method of auto-scale representations for ReID. Specifically, we first design the auto-scale block, mainly composed of the Lite 3 × 3 operations and the RCB 1 × 1 operations. The connection status among the Lite 3 × 3 operations is just our search space. Then we give our entire macro network architecture, the auto-scale network, which is mainly composed of 6 auto-scale blocks. Unlike other NAS-related work, each block in our search space does not need to share the same architecture but can maintain a different architecture. In the search process, we propose the entropy regularization, the validity regularization and the consistent regularization to alleviate the discretized gap, no valid path, and meaningless edges, respectively. Finally, we verify the effectiveness of the model we searched on four commonly used datasets. Our model can maintain the same 2.2 M parameters as OSNet but can achieve the performance of SOTA. The mAP on the Market1501 dataset can reach 88.7%.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yang, Xiaojing; Guo, Min; Lyu, Qiongshuai; Ma, Miao
Detection and classification of damaged wheat kernels based on progressive neural architecture search Journal Article
In: Biosystems Engineering, vol. 208, pp. 176-185, 2021, ISSN: 1537-5110.
@article{YANG2021176,
title = {Detection and classification of damaged wheat kernels based on progressive neural architecture search},
author = {Xiaojing Yang and Min Guo and Qiongshuai Lyu and Miao Ma},
url = {https://www.sciencedirect.com/science/article/pii/S1537511021001161},
doi = {https://doi.org/10.1016/j.biosystemseng.2021.05.016},
issn = {1537-5110},
year = {2021},
date = {2021-01-01},
journal = {Biosystems Engineering},
volume = {208},
pages = {176-185},
abstract = {Quantity and quality of grain are both closely related to national development and social stability. Grain is lost during storage due to mildew and insects. Detection of damaged grain kernels not only can reduce the loss of grain, but also protect human beings from diseases caused by damaged grain. Therefore, research on the automatic detection of damaged grain is of continued urgency. In this paper, we propose a framework combining spectrogram generative adversarial network and progressive neural architecture search (SPGAN-PNAS) to detect and classify mildew-damaged wheat kernels (MDK), insect-damaged wheat kernels (IDK) and undamaged wheat kernels (UDK). First, the spectrogram generative adversarial network (SPGAN) is designed to enlarge the data set. Second, we apply progressive neural architecture search (PNAS) to generate network structure to classify three types of wheat kernels. An F1 of 96.2% is obtained using the proposed method with 5-fold cross-validation. The results are superior to the classical neural networks for detection and classification of damaged wheat kernels. Experimental results show that the structure of SPGAN-PNAS is feasible and effective.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hu, Yufei; Belkhir, Nacim; ú, Jes; Yao, Angela; Franchi, Gianni
Learning Deep Morphological Networks with Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-07714,
title = {Learning Deep Morphological Networks with Neural Architecture Search},
author = {Yufei Hu and Nacim Belkhir and Jes ú and Angela Yao and Gianni Franchi},
url = {https://arxiv.org/abs/2106.07714},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.07714},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jie, Renlong; Gao, Junbin
Differentiable Neural Architecture Search with Morphism-based Transformable Backbone Architectures Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-07211,
title = {Differentiable Neural Architecture Search with Morphism-based Transformable Backbone Architectures},
author = {Renlong Jie and Junbin Gao},
url = {https://arxiv.org/abs/2106.07211},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.07211},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xiang, Lichuan; Dudziak, Lukasz; Abdelfattah, Mohamed S; Chau, Thomas C P; Lane, Nicholas D; Wen, Hongkai
Zero-Cost Proxies Meet Differentiable Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-06799,
title = {Zero-Cost Proxies Meet Differentiable Architecture Search},
author = {Lichuan Xiang and Lukasz Dudziak and Mohamed S Abdelfattah and Thomas C P Chau and Nicholas D Lane and Hongkai Wen},
url = {https://arxiv.org/abs/2106.06799},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.06799},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ding, Mingyu; Lian, Xiaochen; Yang, Linjie; Wang, Peng; Jin, Xiaojie; Lu, Zhiwu; Luo, Ping
HR-NAS: Searching Efficient High-Resolution Neural Architectures with Lightweight Transformers Proceedings Article
In: CVPR2021, 2021.
@inproceedings{DBLP:journals/corr/abs-2106-06560,
title = {HR-NAS: Searching Efficient High-Resolution Neural Architectures with Lightweight Transformers},
author = {Mingyu Ding and Xiaochen Lian and Linjie Yang and Peng Wang and Xiaojie Jin and Zhiwu Lu and Ping Luo},
url = {https://openaccess.thecvf.com/content/CVPR2021/papers/Ding_HR-NAS_Searching_Efficient_High-Resolution_Neural_Architectures_With_Lightweight_Transformers_CVPR_2021_paper.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {CVPR2021},
journal = {CoRR},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chuangtao; He, Zhimin; Li, Lvzhou; Zheng, Shenggen; Situ, Haozhen
Quantum Architecture Search with Meta-learning Technical Report
2021.
@techreport{chen2021quantum,
title = {Quantum Architecture Search with Meta-learning},
author = {Chuangtao Chen and Zhimin He and Lvzhou Li and Shenggen Zheng and Haozhen Situ},
url = {https://arxiv.org/abs/2106.06248},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Fu, Yonggan; Zhang, Yongan; Zhang, Yang; Cox, David; Lin, Yingyan
Auto-NBA: Efficient and Effective Search Over the Joint Space of Networks, Bitwidths, and Accelerators Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-06575,
title = {Auto-NBA: Efficient and Effective Search Over the Joint Space of Networks, Bitwidths, and Accelerators},
author = {Yonggan Fu and Yongan Zhang and Yang Zhang and David Cox and Yingyan Lin},
url = {https://arxiv.org/abs/2106.06575},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.06575},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Akhauri, Yash; Niranjan, Adithya; ñ, Pablo Mu J; Banerjee, Suvadeep; Davare, Abhijit; Cocchini, Pasquale; Sorokin, Anton A; Iyer, Ravi; Jain, Nilesh
RHNAS: Realizable Hardware and Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-09180,
title = {RHNAS: Realizable Hardware and Neural Architecture Search},
author = {Yash Akhauri and Adithya Niranjan and Pablo Mu J ñ and Suvadeep Banerjee and Abhijit Davare and Pasquale Cocchini and Anton A Sorokin and Ravi Iyer and Nilesh Jain},
url = {https://arxiv.org/abs/2106.09180},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.09180},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Garciarena, Unai; Santana, Roberto; Mendiburu, Alexander
Redefining Neural Architecture Search of Heterogeneous Multi-Network Models by Characterizing Variation Operators and Model Components Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-08972,
title = {Redefining Neural Architecture Search of Heterogeneous Multi-Network Models by Characterizing Variation Operators and Model Components},
author = {Unai Garciarena and Roberto Santana and Alexander Mendiburu},
url = {https://arxiv.org/abs/2106.08972},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.08972},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Hanjun; Wu, Gaojie; -, Wei
Combined Depth Space based Architecture Search For Person Re-identification Book Section
In: CVPR2021, vol. abs/2104.04163, 2021.
@incollection{DBLP:journals/corr/abs-2104-04163,
title = {Combined Depth Space based Architecture Search For Person Re-identification},
author = {Hanjun Li and Gaojie Wu and Wei -},
url = {https://arxiv.org/abs/2104.04163},
year = {2021},
date = {2021-01-01},
booktitle = {CVPR2021},
journal = {CoRR},
volume = {abs/2104.04163},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lee, Hayeon; Lee, Sewoong; Chong, Song; Hwang, Sung Ju
HELP: Hardware-Adaptive Efficient Latency Predictor for NAS via Meta-Learning Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-08630,
title = {HELP: Hardware-Adaptive Efficient Latency Predictor for NAS via Meta-Learning},
author = {Hayeon Lee and Sewoong Lee and Song Chong and Sung Ju Hwang},
url = {https://arxiv.org/abs/2106.08630},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.08630},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chen, Weiwei; Wang, Ying; Lin, Gangliang; Gao, Chengsi; Liu, Cheng; Zhang, Lei
CHaNAS: Coordinated Search for Network Architecture and Scheduling Policy Proceedings Article
In: Proceedings of the 22nd ACM SIGPLAN/SIGBED International Conference on Languages, Compilers, and Tools for Embedded Systems, pp. 42–53, Association for Computing Machinery, Virtual, Canada, 2021, ISBN: 9781450384728.
@inproceedings{10.1145/3461648.3463846,
title = {CHaNAS: Coordinated Search for Network Architecture and Scheduling Policy},
author = {Weiwei Chen and Ying Wang and Gangliang Lin and Chengsi Gao and Cheng Liu and Lei Zhang},
url = {https://doi.org/10.1145/3461648.3463846},
doi = {10.1145/3461648.3463846},
isbn = {9781450384728},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 22nd ACM SIGPLAN/SIGBED International Conference on Languages, Compilers, and Tools for Embedded Systems},
pages = {42–53},
publisher = {Association for Computing Machinery},
address = {Virtual, Canada},
series = {LCTES 2021},
abstract = {Automatically design an efficient DNN solution for a given deep learning task on the
target hardware mainly decided by the neural network architecture and the schedule
mapping strategy, where the two goals are closely coupled with each other to fully
exploit the advantages of the underlying hardware. Prior hardware-aware Neural Architecture
Search (NAS) methods mostly ignore the impacts of different scheduling policies (e.g.,
graph-level optimization, loop transformations, parallelization, etc.) on network
candidates being evaluated in the search process. Thus, they may miss the true-optimal
architecture that can only be discovered by trying-out different scheduling policies.
This work proposes a NAS framework (CHaNAS) that searches for not only the network
architecture but also the dedicated scheduling policy, as the optimal co-design solution
on target hardware that fully exploits the advantages of the underlying hardware.
We propose to use a block-based pre-scheduling methodology to reduce the co-design
search space, and enable the automatic generation of the optimal co-design, including
the network architecture and the tensor programs that practice the scheduling policy.
We evaluate CHaNAS on Imagenet on different hardware back-ends against the state-of-the-art
hardware-aware search method MobileNet-v3. Experimental results show that the co-design
solutions obtained by ChaNAS show up to 1.6x, 1.9x, and 1.7x performance boost on
NVIDIA P100 GPU, Intel Xeon 8163 CPU, and Samsung Note 10 Mobile, respectively, over
the baselines of the same-level accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
target hardware mainly decided by the neural network architecture and the schedule
mapping strategy, where the two goals are closely coupled with each other to fully
exploit the advantages of the underlying hardware. Prior hardware-aware Neural Architecture
Search (NAS) methods mostly ignore the impacts of different scheduling policies (e.g.,
graph-level optimization, loop transformations, parallelization, etc.) on network
candidates being evaluated in the search process. Thus, they may miss the true-optimal
architecture that can only be discovered by trying-out different scheduling policies.
This work proposes a NAS framework (CHaNAS) that searches for not only the network
architecture but also the dedicated scheduling policy, as the optimal co-design solution
on target hardware that fully exploits the advantages of the underlying hardware.
We propose to use a block-based pre-scheduling methodology to reduce the co-design
search space, and enable the automatic generation of the optimal co-design, including
the network architecture and the tensor programs that practice the scheduling policy.
We evaluate CHaNAS on Imagenet on different hardware back-ends against the state-of-the-art
hardware-aware search method MobileNet-v3. Experimental results show that the co-design
solutions obtained by ChaNAS show up to 1.6x, 1.9x, and 1.7x performance boost on
NVIDIA P100 GPU, Intel Xeon 8163 CPU, and Samsung Note 10 Mobile, respectively, over
the baselines of the same-level accuracy.
Manu, Daniel; Huang, Shaoyi; Ding, Caiwen; Yang, Lei
Co-Exploration of Graph Neural Network and Network-on-Chip Design Using AutoML Proceedings Article
In: Proceedings of the 2021 on Great Lakes Symposium on VLSI, pp. 175–180, Association for Computing Machinery, Virtual Event, USA, 2021, ISBN: 9781450383936.
@inproceedings{10.1145/3453688.3461741,
title = {Co-Exploration of Graph Neural Network and Network-on-Chip Design Using AutoML},
author = {Daniel Manu and Shaoyi Huang and Caiwen Ding and Lei Yang},
url = {https://doi.org/10.1145/3453688.3461741},
doi = {10.1145/3453688.3461741},
isbn = {9781450383936},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 2021 on Great Lakes Symposium on VLSI},
pages = {175–180},
publisher = {Association for Computing Machinery},
address = {Virtual Event, USA},
series = {GLSVLSI '21},
abstract = {Recently, Graph Neural Networks (GNNs) have exhibited high efficiency in several graph-based
machine learning tasks. Compared with the neural networks for computer vision or speech
tasks (e.g., Convolutional Neural Networks), GNNs have much higher requirements on
communication due to the complicated graph structures; however, when applying GNNs
for real-world applications, say in recommender systems (e.g. Uber Eats), it commonly
has the real-time requirements. To deal with the tradeoff between the complicated
architecture and the high-demand timing performance, both GNN architecture and hardware
accelerator need to be optimized. Network-on-Chip (NoC), derived for efficiently managing
the high-volume of communications, naturally becomes one of the top candidates to
accelerate GNNs. However, there is a missing link between the optimize of GNN architecture
and the NoC design.In this work, we present an AutoML-based framework GN-NAS, aiming
at searching for the optimum GNN architecture, which can be suitable for the NoC accelerator.
We devise a robust reinforcement learning based controller to validate the retained
best GNN architectures, coupled with a parameter sharing approach, namely ParamShare,
to improve search efficiency. Experimental results on four graph-based benchmark datasets,
Cora, Citeseer, Pubmed and Protein-Protein Interaction show that the GNN architectures
obtained by our framework outperform that of the state-of-the-art and baseline models,
whilst reducing model size which makes them easy to deploy onto the NoC platform.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
machine learning tasks. Compared with the neural networks for computer vision or speech
tasks (e.g., Convolutional Neural Networks), GNNs have much higher requirements on
communication due to the complicated graph structures; however, when applying GNNs
for real-world applications, say in recommender systems (e.g. Uber Eats), it commonly
has the real-time requirements. To deal with the tradeoff between the complicated
architecture and the high-demand timing performance, both GNN architecture and hardware
accelerator need to be optimized. Network-on-Chip (NoC), derived for efficiently managing
the high-volume of communications, naturally becomes one of the top candidates to
accelerate GNNs. However, there is a missing link between the optimize of GNN architecture
and the NoC design.In this work, we present an AutoML-based framework GN-NAS, aiming
at searching for the optimum GNN architecture, which can be suitable for the NoC accelerator.
We devise a robust reinforcement learning based controller to validate the retained
best GNN architectures, coupled with a parameter sharing approach, namely ParamShare,
to improve search efficiency. Experimental results on four graph-based benchmark datasets,
Cora, Citeseer, Pubmed and Protein-Protein Interaction show that the GNN architectures
obtained by our framework outperform that of the state-of-the-art and baseline models,
whilst reducing model size which makes them easy to deploy onto the NoC platform.
Li, Yuening; Chen, Zhengzhang; Zha, Daochen; Zhou, Kaixiong; Jin, Haifeng; Chen, Haifeng; Hu, Xia
AutoOD: Neural Architecture Search for Outlier Detection Proceedings Article
In: 2021 IEEE 37th International Conference on Data Engineering (ICDE), pp. 2117-2122, 2021.
@inproceedings{9458691,
title = {AutoOD: Neural Architecture Search for Outlier Detection},
author = {Yuening Li and Zhengzhang Chen and Daochen Zha and Kaixiong Zhou and Haifeng Jin and Haifeng Chen and Xia Hu},
url = {https://ieeexplore.ieee.org/abstract/document/9458691},
doi = {10.1109/ICDE51399.2021.00210},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE)},
pages = {2117-2122},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azarmehr, Neda; Ye, Xujiong; Howard, James P; Lane, Elisabeth S; Labs, Robert; Shun-Shin, Matthew J; Cole, Graham D; Bidaut, Luc; Francis, Darrel P; Zolgharni, Massoud
Neural architecture search of echocardiography view classifiers Journal Article
In: Journal of Medical Imaging, vol. 8, no. 3, pp. 1 – 21, 2021.
@article{10.1117/1.JMI.8.3.034002,
title = {Neural architecture search of echocardiography view classifiers},
author = {Neda Azarmehr and Xujiong Ye and James P Howard and Elisabeth S Lane and Robert Labs and Matthew J Shun-Shin and Graham D Cole and Luc Bidaut and Darrel P Francis and Massoud Zolgharni},
url = {https://doi.org/10.1117/1.JMI.8.3.034002},
doi = {10.1117/1.JMI.8.3.034002},
year = {2021},
date = {2021-01-01},
journal = {Journal of Medical Imaging},
volume = {8},
number = {3},
pages = {1 -- 21},
publisher = {SPIE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Maile, Kaitlin; Lecarpentier, Erwan; é, Herv; Wilson, Dennis G
On Constrained Optimization in Differentiable Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-11655,
title = {On Constrained Optimization in Differentiable Neural Architecture Search},
author = {Kaitlin Maile and Erwan Lecarpentier and Herv é and Dennis G Wilson},
url = {https://arxiv.org/abs/2106.11655},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.11655},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Santra, Santanu; Hsieh, Jun-Wei; Lin, Chi-Fang
Gradient Descent Effects on Differential Neural Architecture Search: A Survey Journal Article
In: IEEE Access, vol. 9, pp. 89602-89618, 2021.
@article{9461192,
title = {Gradient Descent Effects on Differential Neural Architecture Search: A Survey},
author = {Santanu Santra and Jun-Wei Hsieh and Chi-Fang Lin},
url = {https://ieeexplore.ieee.org/abstract/document/9461192},
doi = {10.1109/ACCESS.2021.3090918},
year = {2021},
date = {2021-01-01},
journal = {IEEE Access},
volume = {9},
pages = {89602-89618},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zeinali, Behnam; Zhuang, Di; Chang, Morris J
ESAI: Efficient Split Artificial Intelligence via Early Exiting Using Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-12549,
title = {ESAI: Efficient Split Artificial Intelligence via Early Exiting Using Neural Architecture Search},
author = {Behnam Zeinali and Di Zhuang and Morris J Chang},
url = {https://arxiv.org/abs/2106.12549},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.12549},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Eriksson, David; -, Pierce I; Daulton, Samuel; Xia, Peng; Shrivastava, Akshat; Babu, Arun; Zhao, Shicong; Aly, Ahmed; Venkatesh, Ganesh; Balandat, Maximilian
Latency-Aware Neural Architecture Search with Multi-Objective Bayesian Optimization Proceedings Article
In: 8th ICML Workshop on Automated Machine Learning (2021), 2021.
@inproceedings{DBLP:journals/corr/abs-2106-11890,
title = {Latency-Aware Neural Architecture Search with Multi-Objective Bayesian Optimization},
author = {David Eriksson and Pierce I - and Samuel Daulton and Peng Xia and Akshat Shrivastava and Arun Babu and Shicong Zhao and Ahmed Aly and Ganesh Venkatesh and Maximilian Balandat},
url = {https://arxiv.org/abs/2106.11890},
year = {2021},
date = {2021-01-01},
booktitle = {8th ICML Workshop on Automated Machine Learning (2021)},
journal = {CoRR},
volume = {abs/2106.11890},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Klos, Andreas; Rosenbaum, Marius; Schiffmann, Wolfram
Scalable and Highly Available Multi-Objective Neural Architecture Search in Bare Metal Kubernetes Cluster Proceedings Article
In: 2021 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), pp. 605-610, 2021.
@inproceedings{9460405,
title = {Scalable and Highly Available Multi-Objective Neural Architecture Search in Bare Metal Kubernetes Cluster},
author = {Andreas Klos and Marius Rosenbaum and Wolfram Schiffmann},
url = {https://ieeexplore.ieee.org/abstract/document/9460405},
doi = {10.1109/IPDPSW52791.2021.00094},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)},
pages = {605-610},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Miao; Su, Steven W; Pan, Shirui; Chang, Xiaojun; Huang, Wei; Haffari, Gholamreza
Differentiable Architecture Search Without Training Nor Labels: A Pruning Perspective Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-11542,
title = {Differentiable Architecture Search Without Training Nor Labels: A Pruning Perspective},
author = {Miao Zhang and Steven W Su and Shirui Pan and Xiaojun Chang and Wei Huang and Gholamreza Haffari},
url = {https://arxiv.org/abs/2106.11542},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.11542},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Nunes, Matheus; Fraga, Paulo M; Pappa, Gisele L
Fitness Landscape Analysis of Graph Neural Network Architecture Search Spaces Proceedings Article
In: Proceedings of the Genetic and Evolutionary Computation Conference, pp. 876–884, Association for Computing Machinery, Lille, France, 2021, ISBN: 9781450383509.
@inproceedings{10.1145/3449639.3459318,
title = {Fitness Landscape Analysis of Graph Neural Network Architecture Search Spaces},
author = {Matheus Nunes and Paulo M Fraga and Gisele L Pappa},
url = {https://doi.org/10.1145/3449639.3459318},
doi = {10.1145/3449639.3459318},
isbn = {9781450383509},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the Genetic and Evolutionary Computation Conference},
pages = {876–884},
publisher = {Association for Computing Machinery},
address = {Lille, France},
series = {GECCO '21},
abstract = {Neural Architecture Search (NAS) is the name given to a set of methods designed to
automatically configure the layout of neural networks. Their success on Convolutional
Neural Networks inspired its use on optimizing other types of neural network architectures,
including Graph Neural Networks (GNNs). GNNs have been extensively applied over several
collections of real-world data, achieving state-of-the-art results in tasks such as
circuit design, molecular structure generation and anomaly detection. Many GNN models
have been recently proposed, and choosing the best model for each problem has become
a cumbersome and error-prone task. Aiming to alleviate this problem, recent works
have proposed strategies for applying NAS to GNN models. However, different search
methods converge relatively fast in the search for a good architecture, which raises
questions about the structure of the problem. In this work we use Fitness Landscape
Analysis (FLA) measures to characterize the search space explored by NAS methods for
GNNs. We sample almost 90k different architectures that cover most of the fitness
range, and represent them using both a one-hot encoding and an embedding representation.
Results of the fitness distance correlation and dispersion metrics show the fitness
landscape is easy to be explored, and presents low neutrality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
automatically configure the layout of neural networks. Their success on Convolutional
Neural Networks inspired its use on optimizing other types of neural network architectures,
including Graph Neural Networks (GNNs). GNNs have been extensively applied over several
collections of real-world data, achieving state-of-the-art results in tasks such as
circuit design, molecular structure generation and anomaly detection. Many GNN models
have been recently proposed, and choosing the best model for each problem has become
a cumbersome and error-prone task. Aiming to alleviate this problem, recent works
have proposed strategies for applying NAS to GNN models. However, different search
methods converge relatively fast in the search for a good architecture, which raises
questions about the structure of the problem. In this work we use Fitness Landscape
Analysis (FLA) measures to characterize the search space explored by NAS methods for
GNNs. We sample almost 90k different architectures that cover most of the fitness
range, and represent them using both a one-hot encoding and an embedding representation.
Results of the fitness distance correlation and dispersion metrics show the fitness
landscape is easy to be explored, and presents low neutrality.
Li, Boyang; Lu, Qing; Jiang, Weiwen; Jung, Taeho; Shi, Yiyu
A mining pool solution for novel proof-of-neural-architecture consensus Proceedings Article
In: 2021 IEEE International Conference on Blockchain and Cryptocurrency (ICBC), pp. 1-3, 2021.
@inproceedings{9461067,
title = {A mining pool solution for novel proof-of-neural-architecture consensus},
author = {Boyang Li and Qing Lu and Weiwen Jiang and Taeho Jung and Yiyu Shi},
doi = {10.1109/ICBC51069.2021.9461067},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE International Conference on Blockchain and Cryptocurrency (ICBC)},
pages = {1-3},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Miao; Su, Steven W; Pan, Shirui; Chang, Xiaojun; Abbasnejad, Ehsan M; Haffari, Reza
iDARTS: Differentiable Architecture Search with Stochastic Implicit Gradients Proceedings Article
In: Meila, Marina; Zhang, Tong (Ed.): Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, pp. 12557–12566, PMLR, 2021.
@inproceedings{DBLP:conf/icml/ZhangSPCAH21,
title = {iDARTS: Differentiable Architecture Search with Stochastic Implicit Gradients},
author = {Miao Zhang and Steven W Su and Shirui Pan and Xiaojun Chang and Ehsan M Abbasnejad and Reza Haffari},
editor = {Marina Meila and Tong Zhang},
url = {http://proceedings.mlr.press/v139/zhang21s.html},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 38th International Conference on Machine Learning,
ICML 2021, 18-24 July 2021, Virtual Event},
volume = {139},
pages = {12557--12566},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZHAO, Huan; YAO, Quanming; TU, Weiwei
Search to aggregate neighborhood for graph neural network Proceedings Article
In: 2021 IEEE 37th International Conference on Data Engineering (ICDE), pp. 552-563, 2021.
@inproceedings{9458743,
title = {Search to aggregate neighborhood for graph neural network},
author = {Huan ZHAO and Quanming YAO and Weiwei TU},
url = {https://ieeexplore.ieee.org/abstract/document/9458743},
doi = {10.1109/ICDE51399.2021.00054},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE 37th International Conference on Data Engineering (ICDE)},
pages = {552-563},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Deng, Xueqing; Zhu, Yi; Tian, Yuxin; Newsam, Shawn D
AutoAdapt: Automated Segmentation Network Search for Unsupervised Domain Adaptation Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2106-13227,
title = {AutoAdapt: Automated Segmentation Network Search for Unsupervised Domain Adaptation},
author = {Xueqing Deng and Yi Zhu and Yuxin Tian and Shawn D Newsam},
url = {https://arxiv.org/abs/2106.13227},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2106.13227},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}