Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2021
Abdelfattah, Mohamed S; Mehrotra, Abhinav; Dudziak, Lukasz; Lane, Nicholas D
Zero-Cost Proxies for Lightweight NAS Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-08134,
title = {Zero-Cost Proxies for Lightweight NAS},
author = {Mohamed S Abdelfattah and Abhinav Mehrotra and Lukasz Dudziak and Nicholas D Lane},
url = {https://arxiv.org/abs/2101.08134},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.08134},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Benmeziane, Hadjer; Maghraoui, Kaoutar El; Ouarnoughi, Hamza; ï, Sma; Wistuba, Martin; Wang, Naigang
A Comprehensive Survey on Hardware-Aware Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-09336,
title = {A Comprehensive Survey on Hardware-Aware Neural Architecture Search},
author = {Hadjer Benmeziane and Kaoutar El Maghraoui and Hamza Ouarnoughi and Sma ï and Martin Wistuba and Naigang Wang},
url = {https://arxiv.org/abs/2101.09336},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.09336},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
He, Xin; Wang, Shihao; Ying, Guohao; Zhang, Jiyong; Chu, Xiaowen
Efficient Multi-objective Evolutionary 3D Neural Architecture Search for COVID-19 Detection with Chest CT Scans Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-10667,
title = {Efficient Multi-objective Evolutionary 3D Neural Architecture Search for COVID-19 Detection with Chest CT Scans},
author = {Xin He and Shihao Wang and Guohao Ying and Jiyong Zhang and Xiaowen Chu},
url = {https://arxiv.org/abs/2101.10667},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.10667},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhao, Jiakun; Zhang, Ruifeng; Zhou, Zheng; Chen, Si; Jin, Ju; Liu, Qingfang
A Neural Architecture Search Method Based on Gradient Descent for Remaining Useful Life Estimation Journal Article
In: Neurocomputing, 2021, ISSN: 0925-2312.
@article{ZHAO2021,
title = {A Neural Architecture Search Method Based on Gradient Descent for Remaining Useful Life Estimation},
author = {Jiakun Zhao and Ruifeng Zhang and Zheng Zhou and Si Chen and Ju Jin and Qingfang Liu},
url = {https://www.sciencedirect.com/science/article/pii/S092523122100148X},
doi = {https://doi.org/10.1016/j.neucom.2021.01.072},
issn = {0925-2312},
year = {2021},
date = {2021-01-01},
journal = {Neurocomputing},
abstract = {Remaining useful life is the estimated continuous normal working time of a component or system from the current moment to the potential failure. The traditional methods have high trial-and-error costs and poor migration capabilities. Fortunately, the neural architecture search (NAS) that has emerged partially solves the problem of automatic construction of network models. However, the search strategy for NAS is reinforcement learning or evolutionary algorithms, which essentially search in discrete space and treating the objective function as a black box, which is very time-consuming. To solve this problem, we proposed a gradient-based neural architecture search method. This method regards a cell in the search space as a directed acyclic graph (DAG) containing N ordered nodes. Each node is a latent representation, and the directed edges represent the conversion operation of two nodes. By mixing the candidate operations (ReLU, tanh) with the softmax function, the search space becomes a continuous space and the objective function becomes a differentiable function, so gradient-based optimization methods can be used to find the optimal structure. A neural architecture search method based on gradient descent for RUL estimation, with extensive experiments showing apparently, outperforms traditional approaches as well as Long Short-Term Memory (LSTM), and it takes much less computing resources than the reinforcement neural architecture search method.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kertész, Gábor; Szénási, Sándor; Vámossy, Zoltán
Comparative analysis of image projection-based descriptors in Siamese neural networks Journal Article
In: Advances in Engineering Software, vol. 154, pp. 102963, 2021, ISSN: 0965-9978.
@article{KERTESZ2021102963,
title = {Comparative analysis of image projection-based descriptors in Siamese neural networks},
author = {Gábor Kertész and Sándor Szénási and Zoltán Vámossy},
url = {https://www.sciencedirect.com/science/article/pii/S0965997820310097},
doi = {https://doi.org/10.1016/j.advengsoft.2020.102963},
issn = {0965-9978},
year = {2021},
date = {2021-01-01},
journal = {Advances in Engineering Software},
volume = {154},
pages = {102963},
abstract = {Low-level object matching can be done using projection signatures. In case of a large number of projections, the matching algorithm has to deal with less significant slices. A trivial approach would be to do statistical analysis or apply machine learning to determine the significant features. To take adjacent values of the projection matrices into account, a convolutional neural network should be used. To compare two matrices, a Siamese-structure of convolutional heads can be applied. In this paper, an experiment is designed and implemented to analyze the object matching performance of Siamese Convolutional Neural Networks based on multi-directional image projection data. A backtracking search-based Neural Architecture Generation method is used to create convolutional architectures, and a Master/Worker structured distributed processing with highly efficient scheduling based on the Longest Processing Times-heuristics is used for parallel training and evaluation of the models. Results show that the projection-based methods are Pareto optimal in terms of one-shot classification accuracy and memory consumption.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Xuanyang; Hou, Pengfei; Zhang, Xiangyu; Sun, Jian
Neural Architecture Search with Random Labels Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-11834,
title = {Neural Architecture Search with Random Labels},
author = {Xuanyang Zhang and Pengfei Hou and Xiangyu Zhang and Jian Sun},
url = {https://arxiv.org/abs/2101.11834},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.11834},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Liang, Xinle; Liu, Yang; Luo, Jiahuan; He, Yuanqin; Chen, Tianjian; Yang, Qiang
Self-supervised Cross-silo Federated Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-11896,
title = {Self-supervised Cross-silo Federated Neural Architecture Search},
author = {Xinle Liang and Yang Liu and Jiahuan Luo and Yuanqin He and Tianjian Chen and Qiang Yang},
url = {https://arxiv.org/abs/2101.11896},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.11896},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yang, Yibo; You, Shan; Li, Hongyang; Wang, Fei; Qian, Chen; Lin, Zhouchen
Towards Improving the Consistency, Efficiency, and Flexibility of Differentiable Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-11342,
title = {Towards Improving the Consistency, Efficiency, and Flexibility of Differentiable Neural Architecture Search},
author = {Yibo Yang and Shan You and Hongyang Li and Fei Wang and Chen Qian and Zhouchen Lin},
url = {https://arxiv.org/abs/2101.11342},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.11342},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lu, Longfei; Lyu, Bo
Reducing energy consumption of Neural Architecture Search: An inference latency prediction framework Journal Article
In: Sustainable Cities and Society, vol. 67, pp. 102747, 2021, ISSN: 2210-6707.
@article{LU2021102747,
title = {Reducing energy consumption of Neural Architecture Search: An inference latency prediction framework},
author = {Longfei Lu and Bo Lyu},
url = {https://www.sciencedirect.com/science/article/pii/S221067072100041X},
doi = {https://doi.org/10.1016/j.scs.2021.102747},
issn = {2210-6707},
year = {2021},
date = {2021-01-01},
journal = {Sustainable Cities and Society},
volume = {67},
pages = {102747},
abstract = {Benefit from the success of NAS (Neural Architecture Search) in deep learning, humans are hopefully been released from the tremendous labor of manual tuning of structure and hyper-parameters. However, the success of NAS comes at the cost of much more computational resource consumption, thousands of times more computational power than ordinary training of manual-designed models, especially for the resource-aware multi-objective NAS, which must be serialized as a sequential loop of sampling, training, deployment, and inference. Recent research has shown that deep learning leads to huge energy consumption and CO2 emission (training of the namely Transformer can emit CO2 as much as five cars in their lifetimes Strubell et al. (2019)). Aiming to alleviate this issue, we propose the end-to-end inference latency prediction framework to empower the NAS process with a direct resource-aware efficiency indicator. Namely, we first propose the end-to-end latency prediction framework, which can predict latency quickly and accurately based on the dataset collected by ourselves. Eventually, we experimentally show that with the encoding scheme we designed, our proposed best model, LSTM-GBDT Latency Predictor(LGLP) achieves an excellent result of 0.9349 MSE, 0.5249 MAE, 0.9842 R2, and 0.9925 corrcoef. In other words, our limited dataset and encoding scheme already provide the precise knowledge representation of this large search space. By equipping NAS with the proposed framework, taking NEMO for example, it will save 1588 kWh⋅PUE energy, 1515 pounds CO2 emissions, and $3176 cloud compute cost of AWS. For NAS is now widely exploited in research or industry applications, this will bring incalculable benefits to society and the environment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pinos, Michal; Mrazek, Vojtech; á, Luk
Evolutionary Neural Architecture Search Supporting Approximate Multipliers Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2101-11883,
title = {Evolutionary Neural Architecture Search Supporting Approximate Multipliers},
author = {Michal Pinos and Vojtech Mrazek and Luk á},
url = {https://arxiv.org/abs/2101.11883},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2101.11883},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lyu, B; Yuan, H; Lu, L; Zhang, Y
Resource-constrained Neural Architecture Search on Edge Devices Journal Article
In: IEEE Transactions on Network Science and Engineering, pp. 1-1, 2021.
@article{9336306,
title = {Resource-constrained Neural Architecture Search on Edge Devices},
author = {B Lyu and H Yuan and L Lu and Y Zhang},
url = {https://ieeexplore.ieee.org/document/9336306},
doi = {10.1109/TNSE.2021.3054583, keywords-NAS},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Network Science and Engineering},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gomez-Rosero, Santiago; Capretz, Miriam A M; Mir, Syed
Transfer Learning by Similarity Centred Architecture Evolution for Multiple Residential Load Forecasting Journal Article
In: Smart Cities, vol. 4, no. 1, pp. 217–240, 2021, ISSN: 2624-6511.
@article{smartcities4010014,
title = {Transfer Learning by Similarity Centred Architecture Evolution for Multiple Residential Load Forecasting},
author = {Santiago Gomez-Rosero and Miriam A M Capretz and Syed Mir},
url = {https://www.mdpi.com/2624-6511/4/1/14},
doi = {10.3390/smartcities4010014},
issn = {2624-6511},
year = {2021},
date = {2021-01-01},
journal = {Smart Cities},
volume = {4},
number = {1},
pages = {217--240},
abstract = {The development from traditional low voltage grids to smart systems has become extensive and adopted worldwide. Expanding the demand response program to cover the residential sector raises a wide range of challenges. Short term load forecasting for residential consumers in a neighbourhood could lead to a better understanding of low voltage consumption behaviour. Nevertheless, users with similar characteristics can present diversity in consumption patterns. Consequently, transfer learning methods have become a useful tool to tackle differences among residential time series. This paper proposes a method combining evolutionary algorithms for neural architecture search with transfer learning to perform short term load forecasting in a neighbourhood with multiple household load consumption. The approach centres its efforts on neural architecture search using evolutionary algorithms. The neural architecture evolution process retains the patterns of the centre-most house, and later the architecture weights are adjusted for each house in a multihouse set from a neighbourhood. In addition, a sensitivity analysis was conducted to ensure model performance. Experimental results on a large dataset containing hourly load consumption for ten houses in London, Ontario showed that the performance of the proposed approach performs better than the compared techniques. Moreover, the proposed method presents the average accuracy performance of 3.17 points higher than the state-of-the-art LSTM one shot method.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lin, Ming; Wang, Pichao; Sun, Zhenhong; Chen, Hesen; Sun, Xiuyu; Qian, Qi; Li, Hao; Jin, Rong
Zen-NAS: A Zero-Shot NAS for High-Performance Deep Image Recognition Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-01063,
title = {Zen-NAS: A Zero-Shot NAS for High-Performance Deep Image Recognition},
author = {Ming Lin and Pichao Wang and Zhenhong Sun and Hesen Chen and Xiuyu Sun and Qi Qian and Hao Li and Rong Jin},
url = {https://arxiv.org/abs/2102.01063},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.01063},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Odema, Mohanad; Rashid, Nafiul; Faruque, Mohammad Abdullah Al
Energy-Aware Design Methodology for Myocardial Infarction Detection on Low-Power Wearable Devices Proceedings Article
In: Proceedings of the 26th Asia and South Pacific Design Automation Conference, pp. 621–626, Association for Computing Machinery, Tokyo, Japan, 2021, ISBN: 9781450379991.
@inproceedings{10.1145/3394885.3431513,
title = {Energy-Aware Design Methodology for Myocardial Infarction Detection on Low-Power Wearable Devices},
author = {Mohanad Odema and Nafiul Rashid and Mohammad Abdullah Al Faruque},
url = {https://doi.org/10.1145/3394885.3431513},
doi = {10.1145/3394885.3431513},
isbn = {9781450379991},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 26th Asia and South Pacific Design Automation Conference},
pages = {621–626},
publisher = {Association for Computing Machinery},
address = {Tokyo, Japan},
series = {ASPDAC '21},
abstract = {Myocardial Infarction (MI) is a heart disease that damages the heart muscle and requires immediate treatment. Its silent and recurrent nature necessitates real-time continuous monitoring of patients. Nowadays, wearable devices are smart enough to perform on-device processing of heartbeat segments and report any irregularities in them. However, the small form factor of wearable devices imposes resource constraints and requires energy-efficient solutions to satisfy them. In this paper, we propose a design methodology to automate the design space exploration of neural network architectures for MI detection. This methodology incorporates Neural Architecture Search (NAS) using Multi-Objective Bayesian Optimization (MOBO) to render Pareto optimal architectural models. These models minimize both detection error and energy consumption on the target device. The design space is inspired by Binary Convolutional Neural Networks (BCNNs) suited for mobile health applications with limited resources. The models' performance is validated using the PTB diagnostic ECG database from PhysioNet. Moreover, energy-related measurements are directly obtained from the target device in a typical hardware-in-the-loop fashion. Finally, we benchmark our models against other related works. One model exceeds state-of-the-art accuracy on wearable devices (reaching 91.22%), whereas others trade off some accuracy to reduce their energy consumption (by a factor reaching 8.26x).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liang, Shuang; Tang, Changcheng; Ning, Xuefei; Zeng, Shulin; Yu, Jincheng; Wang, Yu; Guo, Kaiyuan; Yang, Diange; Lu, Tianyi; Yang, Huazhong
Efficient Computing Platform Design for Autonomous Driving Systems Proceedings Article
In: Proceedings of the 26th Asia and South Pacific Design Automation Conference, pp. 734–741, Association for Computing Machinery, Tokyo, Japan, 2021, ISBN: 9781450379991.
@inproceedings{10.1145/3394885.3431620,
title = {Efficient Computing Platform Design for Autonomous Driving Systems},
author = {Shuang Liang and Changcheng Tang and Xuefei Ning and Shulin Zeng and Jincheng Yu and Yu Wang and Kaiyuan Guo and Diange Yang and Tianyi Lu and Huazhong Yang},
url = {https://doi.org/10.1145/3394885.3431620},
doi = {10.1145/3394885.3431620},
isbn = {9781450379991},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings of the 26th Asia and South Pacific Design Automation Conference},
pages = {734–741},
publisher = {Association for Computing Machinery},
address = {Tokyo, Japan},
series = {ASPDAC '21},
abstract = {Autonomous driving is becoming a hot topic in both academic and industrial communities. Traditional algorithms can hardly achieve the complex tasks and meet the high safety criteria. Recent research on deep learning shows significant performance improvement over traditional algorithms and is believed to be a strong candidate in autonomous driving system. Despite the attractive performance, deep learning does not solve the problem totally. The application scenario requires that an autonomous driving system must work in real-time to keep safety. But the high computation complexity of neural network model, together with complicated pre-process and post-process, brings great challenges. System designers need to do dedicated optimizations to make a practical computing platform for autonomous driving. In this paper, we introduce our work on efficient computing platform design for autonomous driving systems. In the software level, we introduce neural network compression and hardware-aware architecture search to reduce the workload. In the hardware level, we propose customized hardware accelerators for pre- and post-process of deep learning algorithms. Finally, we introduce the hardware platform design, NOVA-30, and our on-vehicle evaluation project.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jie, R; Gao, J
Differentiable Neural Architecture Search for High-Dimensional Time Series Forecasting Journal Article
In: IEEE Access, vol. 9, pp. 20922-20932, 2021.
@article{9340253,
title = {Differentiable Neural Architecture Search for High-Dimensional Time Series Forecasting},
author = {R Jie and J Gao},
url = {https://ieeexplore.ieee.org/document/9340253},
doi = {10.1109/ACCESS.2021.3055555},
year = {2021},
date = {2021-01-01},
journal = {IEEE Access},
volume = {9},
pages = {20922-20932},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lang, Sebastian; Reggelin, Tobias; Schmidt, Johann; Müller, Marcel; Nahhas, Abdulrahman
In: Expert Systems with Applications, vol. 172, pp. 114666, 2021, ISSN: 0957-4174.
@article{LANG2021114666,
title = {NeuroEvolution of augmenting topologies for solving a two-stage hybrid flow shop scheduling problem: A comparison of different solution strategies},
author = {Sebastian Lang and Tobias Reggelin and Johann Schmidt and Marcel Müller and Abdulrahman Nahhas},
url = {https://www.sciencedirect.com/science/article/pii/S095741742100107X},
doi = {https://doi.org/10.1016/j.eswa.2021.114666},
issn = {0957-4174},
year = {2021},
date = {2021-01-01},
journal = {Expert Systems with Applications},
volume = {172},
pages = {114666},
abstract = {The article investigates the application of NeuroEvolution of Augmenting Topologies (NEAT) to generate and parameterize artificial neural networks (ANN) on determining allocation and sequencing decisions in a two-stage hybrid flow shop scheduling environment with family setup times. NEAT is a machine-learning and neural architecture search algorithm, which generates both, the structure and the hyper-parameters of an ANN. Our experiments show that NEAT can compete with state-of-the-art approaches in terms of solution quality and outperforms them regarding computational efficiency. The main contributions of this article are: (i) A comparison of five different strategies, evaluated with 14 different experiments, on how ANNs can be applied for solving allocation and sequencing problems in a hybrid flow shop environment, (ii) a comparison of the best identified NEAT strategy with traditional heuristic and metaheuristic approaches concerning solution quality and computational efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Luo, Renqian; Tan, Xu; Wang, Rui; Qin, Tao; Li, Jinzhu; Zhao, Sheng; Chen, Enhong; -, Tie
LightSpeech: Lightweight and Fast Text to Speech with Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-04040,
title = {LightSpeech: Lightweight and Fast Text to Speech with Neural Architecture
Search},
author = {Renqian Luo and Xu Tan and Rui Wang and Tao Qin and Jinzhu Li and Sheng Zhao and Enhong Chen and Tie -},
url = {https://arxiv.org/abs/2102.04040},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.04040},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Xiaohan; Xie, Ziyan; Lai, Taotao; Zhao, Fusheng; Xu, Haiyin; Chen, Riqing
NAS-WFPN: Neural Architecture Search Weighted Feature Pyramid Networks for Object Detection Proceedings Article
In: Wang, Guojun; Chen, Bing; Li, Wei; Pietro, Roberto Di; Yan, Xuefeng; Han, Hao (Ed.): Security, Privacy, and Anonymity in Computation, Communication, and Storage, pp. 384–394, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-68884-4.
@inproceedings{10.1007/978-3-030-68884-4_32,
title = {NAS-WFPN: Neural Architecture Search Weighted Feature Pyramid Networks for Object Detection},
author = {Xiaohan Li and Ziyan Xie and Taotao Lai and Fusheng Zhao and Haiyin Xu and Riqing Chen},
editor = {Guojun Wang and Bing Chen and Wei Li and Roberto Di Pietro and Xuefeng Yan and Hao Han},
url = {https://link.springer.com/chapter/10.1007/978-3-030-68884-4_32},
isbn = {978-3-030-68884-4},
year = {2021},
date = {2021-01-01},
booktitle = {Security, Privacy, and Anonymity in Computation, Communication, and Storage},
pages = {384--394},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fu, Xianya; Li, Wenrui; Chen, Qiurui; Zhang, Lianyi; Yang, Kai; Qing, Duzheng; Wang, Rui
NASIL: Neural Network Architecture Searching for Incremental Learning in Image Classification Proceedings Article
In: Ning, Li; Chau, Vincent; Lau, Francis (Ed.): Parallel Architectures, Algorithms and Programming, pp. 68–80, Springer Singapore, Singapore, 2021, ISBN: 978-981-16-0010-4.
@inproceedings{10.1007/978-981-16-0010-4_7,
title = {NASIL: Neural Network Architecture Searching for Incremental Learning in Image Classification},
author = {Xianya Fu and Wenrui Li and Qiurui Chen and Lianyi Zhang and Kai Yang and Duzheng Qing and Rui Wang},
editor = {Li Ning and Vincent Chau and Francis Lau},
url = {https://link.springer.com/chapter/10.1007/978-981-16-0010-4_7},
isbn = {978-981-16-0010-4},
year = {2021},
date = {2021-01-01},
booktitle = {Parallel Architectures, Algorithms and Programming},
pages = {68--80},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {``Catastrophic forgetting'' and scalability of tasks are two major challenges of incremental learning. Both of these issues were related to the insufficient capacity of machine learning model and the insufficiently trained weights as the increasing of tasks. In this paper, we try to figure out the impact of the neural network architecture to the performance of incremental learning in the case of image classification. During the increasing of tasks, we propose to use neural network architecture searching (NAS) to find a structure that fits the new tasks collection better. We build a NAS environment with reinforcement learning as the searching strategy and Long Short-Term Memory network as the controller network. Computation operation and connecting previous nodes are selected for each layer in the search phase. For each time a new group of tasks is added, the neural network architecture is searched and reorganized according to the training data set. To speed up the searching, we design a parameter sharing mechanism, in which the same building blocks in each layer share a group of parameters. We also introduce the quantified-parameter building blocks into the NAS, to identify the best candidate during each round of searching. We test our solution in cifar100 data set, the average accuracy outperforms the current representative solutions (LwEMC, iCaRL, GANIL) by 24.92%, 5.62%, and 3.6%, respectively, the more tasks added, the better our solution performs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lou, Xiaoxuan; Guo, Shangwei; Zhang, Tianwei; Zhang, Yinqian; Liu, Yang
When NAS Meets Watermarking: Ownership Verification of DNN Models via Cache Side Channels Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-03523,
title = {When NAS Meets Watermarking: Ownership Verification of DNN Models
via Cache Side Channels},
author = {Xiaoxuan Lou and Shangwei Guo and Tianwei Zhang and Yinqian Zhang and Yang Liu},
url = {https://arxiv.org/abs/2102.03523},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.03523},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, W; Zhu, L
Reliable Network Search Based on Evolutionary Algorithm Proceedings Article
In: 2021 International Conference on Computer, Control and Robotics (ICCCR), pp. 279-282, 2021.
@inproceedings{9349406,
title = {Reliable Network Search Based on Evolutionary Algorithm},
author = {W Wang and L Zhu},
url = {https://ieeexplore.ieee.org/abstract/document/9349406},
doi = {10.1109/ICCCR49711.2021.9349406},
year = {2021},
date = {2021-01-01},
booktitle = {2021 International Conference on Computer, Control and Robotics (ICCCR)},
pages = {279-282},
abstract = {In this paper, we propose a neural architecture compression method based on network search to design a lightweight model for the network compression. The reasonable search method is designed based on evolutionary algorithm and search space for searching the efficient neural architecture, called EANet. The experimental results on several benchmarks datasets show that the performance of the EANet is better and the storage space is smaller. Besides, the light-weight SSD variant detection network based on EANet is applied to a railway intelligent surveillance system, which achieves the practical application based on the CNN model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Sheng; Tan, Mingxing; Pang, Ruoming; Li, Andrew; Cheng, Liqun; Le, Quoc; Jouppi, Norman P
Searching for Fast Model Families on Datacenter Accelerators Technical Report
2021.
@techreport{li2021searching,
title = {Searching for Fast Model Families on Datacenter Accelerators},
author = {Sheng Li and Mingxing Tan and Ruoming Pang and Andrew Li and Liqun Cheng and Quoc Le and Norman P Jouppi},
url = {https://arxiv.org/abs/2102.05610},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
He, Xin; Zhao, Kaiyong; Chu, Xiaowen
AutoML: A Survey of the State-of-the-Art Journal Article
In: Knowledge-Based Systems, vol. 212, pp. 106622, 2021.
@article{he2021automl,
title = {AutoML: A Survey of the State-of-the-Art},
author = {Xin He and Kaiyong Zhao and Xiaowen Chu},
url = {https://www.sciencedirect.com/science/article/abs/pii/S0950705120307516},
year = {2021},
date = {2021-01-01},
journal = {Knowledge-Based Systems},
volume = {212},
pages = {106622},
publisher = {Elsevier},
abstract = {Deep learning (DL) techniques have obtained remarkable achievements on various tasks, such as image recognition, object detection, and language modeling. However, building a high-quality DL system for a specific task highly relies on human expertise, hindering its wide application. Meanwhile, automated machine learning (AutoML) is a promising solution for building a DL system without human assistance and is being extensively studied. This paper presents a comprehensive and up-to-date review of the state-of-the-art (SOTA) in AutoML. According to the DL pipeline, we introduce AutoML methods – covering data preparation, feature engineering, hyperparameter optimization, and neural architecture search (NAS) – with a particular focus on NAS, as it is currently a hot sub-topic of AutoML. We summarize the representative NAS algorithms’ performance on the CIFAR-10 and ImageNet datasets and further discuss the following subjects of NAS methods: one/two-stage NAS, one-shot NAS, joint hyperparameter and architecture optimization, and resource-aware NAS. Finally, we discuss some open problems related to the existing AutoML methods for future research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Peidong; Zhang, Gengwei; Wang, Bochao; Xu, Hang; Liang, Xiaodan; Jiang, Yong; Li, Zhenguo
Loss Function Discovery for Object Detection via Convergence-Simulation Driven Search Technical Report
2021.
@techreport{liu2021loss,
title = {Loss Function Discovery for Object Detection via Convergence-Simulation Driven Search},
author = {Peidong Liu and Gengwei Zhang and Bochao Wang and Hang Xu and Xiaodan Liang and Yong Jiang and Zhenguo Li},
url = {https://arxiv.org/abs/2102.04700},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Su, Xiu; You, Shan; Huang, Tao; Wang, Fei; Qian, Chen; Zhang, Changshui; Xu, Chang
Locally Free Weight Sharing for Network Width Search Technical Report
2021.
@techreport{su2021locally,
title = {Locally Free Weight Sharing for Network Width Search},
author = {Xiu Su and Shan You and Tao Huang and Fei Wang and Chen Qian and Changshui Zhang and Chang Xu},
url = {https://arxiv.org/abs/2102.05258},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sun, Y; Sun, X; Fang, Y; Yen, G G; Liu, Y
A Novel Training Protocol for Performance Predictors of Evolutionary Neural Architecture Search Algorithms Journal Article
In: IEEE Transactions on Evolutionary Computation, pp. 1-1, 2021.
@article{9336721,
title = {A Novel Training Protocol for Performance Predictors of Evolutionary Neural Architecture Search Algorithms},
author = {Y Sun and X Sun and Y Fang and G G Yen and Y Liu},
url = {https://ieeexplore.ieee.org/document/9336721},
doi = {10.1109/TEVC.2021.3055076},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Evolutionary Computation},
pages = {1-1},
abstract = {Evolutionary Neural Architecture Search (ENAS) can automatically design the architectures of Deep Neural Networks (DNNs) using evolutionary computation algorithms. However, most ENAS algorithms require intensive computational resource, which is not necessarily available to the users interested. Performance predictors are a type of regression models which can assist to accomplish the search, while without exerting much computational resource. Despite various performance predictors have been designed, they employ the same training protocol to build the regression models: 1) sampling a set of DNNs with performance as the training dataset, 2) training the model with the mean square error criterion, and 3) predicting the performance of DNNs newly generated during the ENAS. In this paper, we point out that the three steps constituting the training protocol are not well though-out through intuitive and illustrative examples. Furthermore, we propose a new training protocol to address these issues, consisting of designing a pairwise ranking indicator to construct the training target, proposing to use the logistic regression to fit the training samples, and developing a differential method to build the training instances. To verify the effectiveness of the proposed training protocol, four widely used regression models in the field of machine learning have been chosen to perform the comparisons on two benchmark datasets. The experimental results of all the comparisons demonstrate that the proposed training protocol can significantly improve the performance prediction accuracy against the traditional training protocols.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Turner, Jack; Crowley, Elliot J; O'Boyle, Michael F P
Neural Architecture Search as Program Transformation Exploration Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-06599,
title = {Neural Architecture Search as Program Transformation Exploration},
author = {Jack Turner and Elliot J Crowley and Michael F P O'Boyle},
url = {https://arxiv.org/abs/2102.06599},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.06599},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lopes, Vasco; Alirezazadeh, Saeid; í, Lu
EPE-NAS: Efficient Performance Estimation Without Training for Neural Architecture Search Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-08099,
title = {EPE-NAS: Efficient Performance Estimation Without Training for Neural Architecture Search},
author = {Vasco Lopes and Saeid Alirezazadeh and Lu í},
url = {https://arxiv.org/abs/2102.08099},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.08099},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yan, Shen; Song, Kaiqiang; Liu, Fei; Zhang, Mi
CATE: Computation-aware Neural Architecture Encoding with Transformers Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-07108,
title = {CATE: Computation-aware Neural Architecture Encoding with Transformers},
author = {Shen Yan and Kaiqiang Song and Fei Liu and Mi Zhang},
url = {https://arxiv.org/abs/2102.07108},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.07108},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Calisto, Maria Baldeon G; Lai-Yuen, Susana K
EMONAS: efficient multiobjective neural architecture search framework for 3D medical image segmentation Proceedings Article
In: Išgum, Ivana; Landman, Bennett A (Ed.): Medical Imaging 2021: Image Processing, pp. 22 – 34, International Society for Optics and Photonics SPIE, 2021.
@inproceedings{10.1117/12.2577088,
title = {EMONAS: efficient multiobjective neural architecture search framework for 3D medical image segmentation},
author = {Maria Baldeon G Calisto and Susana K Lai-Yuen},
editor = {Ivana Išgum and Bennett A Landman},
url = {https://doi.org/10.1117/12.2577088},
doi = {10.1117/12.2577088},
year = {2021},
date = {2021-01-01},
booktitle = {Medical Imaging 2021: Image Processing},
volume = {11596},
pages = {22 -- 34},
publisher = {SPIE},
organization = {International Society for Optics and Photonics},
abstract = {Deep learning plays a critical role in medical image segmentation. Nevertheless, manually designing a neural network for a specific segmentation problem is a very difficult and time-consuming task due to the massive hyperparameter search space, long training time and large volumetric data. Therefore, most designed networks are highly complex, task specific and over-parametrized. Recently, multiobjective neural architecture search (NAS) methods have been proposed to automate the design of accurate and efficient segmentation architectures. However, they only search for either the macro- or micro-structure of the architecture, do not use the information produced during the optimization process to increase the efficiency of the search, and do not consider the volumetric nature of medical images. In this work, we propose EMONAS, an Efficient MultiObjective Neural Architecture Search framework for 3D medical image segmentation. EMONAS is composed of a search space that considers both the macro- and micro-structure of the architecture, and a surrogate-assisted multiobjective evolutionary based algorithm that efficiently searches for the best hyperparameters using a Random Forest surrogate and guiding selection probabilities. EMONAS is evaluated on the task of cardiac segmentation from the ACDC MICCAI challenge. The architecture found is ranked within the top 10 submissions in all evaluation metrics, performing better or comparable to other approaches while reducing the search time by more than 50% and having considerably fewer number of parameters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Robbiano, Luca; Rahman, Muhammad Rameez Ur; Galasso, Fabio; Caputo, Barbara; Carlucci, Fabio Maria
Adversarial Branch Architecture Search for Unsupervised Domain Adaptation Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-06679,
title = {Adversarial Branch Architecture Search for Unsupervised Domain Adaptation},
author = {Luca Robbiano and Muhammad Rameez Ur Rahman and Fabio Galasso and Barbara Caputo and Fabio Maria Carlucci},
url = {https://arxiv.org/abs/2102.06679},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.06679},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Dilin; Gong, Chengyue; Li, Meng; Liu, Qiang; Chandra, Vikas
AlphaNet: Improved Training of Supernet with Alpha-Divergence Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-07954,
title = {AlphaNet: Improved Training of Supernet with Alpha-Divergence},
author = {Dilin Wang and Chengyue Gong and Meng Li and Qiang Liu and Vikas Chandra},
url = {https://arxiv.org/abs/2102.07954},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.07954},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Roberts, Nicholas Carl; Khodak, Mikhail; Dao, Tri; Li, Liam; Balcan, Nina; Re, Christopher; Talwalkar, Ameet
Searching for Convolutions and a More Ambitious NAS Miscellaneous
2021.
@misc{<LineBreak>roberts2021searching,
title = {Searching for Convolutions and a More Ambitious NAS},
author = {Nicholas Carl Roberts and Mikhail Khodak and Tri Dao and Liam Li and Nina Balcan and Christopher Re and Ameet Talwalkar},
url = {https://openreview.net/forum?id=ascdLuNQY4J},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Ru, Binxin; Wan, Xingchen; Dong, Xiaowen; Osborne, Michael
Interpretable Neural Architecture Search via Bayesian Optimisation with Weisfeiler-Lehman Kernels Proceedings Article
In: International Conference on Learning Representations, 2021.
@inproceedings{<LineBreak>ru2021interpretable,
title = {Interpretable Neural Architecture Search via Bayesian Optimisation with Weisfeiler-Lehman Kernels},
author = {Binxin Ru and Xingchen Wan and Xiaowen Dong and Michael Osborne},
url = {https://openreview.net/forum?id=j9Rv7qdXjd},
year = {2021},
date = {2021-01-01},
booktitle = {International Conference on Learning Representations},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhou, Yanqi; Dong, Xuanyi; Akin, Berkin; Tan, Mingxing; Peng, Daiyi; Meng, Tianjian; Yazdanbakhsh, Amir; Huang, Da; Narayanaswami, Ravi; Laudon, James
Rethinking Co-design of Neural Architectures and Hardware Accelerators Technical Report
2021.
@techreport{DBLP:journals/corr/abs-2102-08619,
title = {Rethinking Co-design of Neural Architectures and Hardware Accelerators},
author = {Yanqi Zhou and Xuanyi Dong and Berkin Akin and Mingxing Tan and Daiyi Peng and Tianjian Meng and Amir Yazdanbakhsh and Da Huang and Ravi Narayanaswami and James Laudon},
url = {https://arxiv.org/abs/2102.08619},
year = {2021},
date = {2021-01-01},
journal = {CoRR},
volume = {abs/2102.08619},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hu, Liangxiao; Liu, Qinglin; Zhang, Jun; Jiang, Feng; Liu, Yang; Zhang, Shengping
A-DARTS: attention-guided differentiable architecture search for lung nodule classification Journal Article
In: Journal of Electronic Imaging, vol. 30, no. 1, pp. 1 – 11, 2021.
@article{10.1117/1.JEI.30.1.013012,
title = {A-DARTS: attention-guided differentiable architecture search for lung nodule classification},
author = {Liangxiao Hu and Qinglin Liu and Jun Zhang and Feng Jiang and Yang Liu and Shengping Zhang},
url = {https://doi.org/10.1117/1.JEI.30.1.013012},
doi = {10.1117/1.JEI.30.1.013012},
year = {2021},
date = {2021-01-01},
journal = {Journal of Electronic Imaging},
volume = {30},
number = {1},
pages = {1 -- 11},
publisher = {SPIE},
abstract = {Lung cancer has caused the most cancer deaths in the past several years. Benign–malignant lung nodule classification is vital in lung nodule detection, which can help early diagnosis of lung cancer. Most existing works extract the features of chest CT images using the well-designed networks, which require substantial effort of experts. To automate the manual process of network design, we propose an attention-guided differentiable architecture search (A-DARTS) method, which directly searches for the optimal network on chest CT images. In addition, A-DARTS utilizes an attention mechanism to alleviate the effect of the initialization-sensitive nature of the searched network while enhancing the feature presentation ability. Extensive experiments on the Lung Image Database Consortium image collection (LIDC-IDRI) benchmark dataset show that the proposed method achieves a lung nodule classification accuracy of 92.93%, which is superior to the state-of-the-art methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nayman, Niv; Aflalo, Yonathan; Noy, Asaf; Zelnik-Manor, Lihi
HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search Technical Report
2021.
@techreport{nayman2021hardcore,
title = {HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search},
author = {Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor},
url = {https://arxiv.org/abs/2102.11646},
year = {2021},
date = {2021-01-01},
journal = {arXiv preprint arXiv:2102.11646},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Nguyen, Nam; Chang, Morris J
Contrastive Self-supervised Neural Architecture Search Miscellaneous
2021.
@misc{nguyen2021contrastive,
title = {Contrastive Self-supervised Neural Architecture Search},
author = {Nam Nguyen and Morris J Chang},
url = {https://arxiv.org/abs/2102.10557},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Guo, Yong; Zheng, Yin; Tan, Mingkui; Chen, Qi; Li, Zhipeng; Chen, Jian; Zhao, Peilin; Huang, Junzhou
Towards Accurate and Compact Architectures via Neural Architecture Transformer Technical Report
2021.
@techreport{guo2021accurate,
title = {Towards Accurate and Compact Architectures via Neural Architecture Transformer},
author = {Yong Guo and Yin Zheng and Mingkui Tan and Qi Chen and Zhipeng Li and Jian Chen and Peilin Zhao and Junzhou Huang},
url = {https://arxiv.org/abs/2102.10301},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wu, Junru; Dai, Xiyang; Chen, Dongdong; Chen, Yinpeng; Liu, Mengchen; Yu, Ye; Wang, Zhangyang; Liu, Zicheng; Chen, Mei; Yuan, Lu
Weak NAS Predictors Are All You Need Technical Report
2021.
@techreport{wu2021weak,
title = {Weak NAS Predictors Are All You Need},
author = {Junru Wu and Xiyang Dai and Dongdong Chen and Yinpeng Chen and Mengchen Liu and Ye Yu and Zhangyang Wang and Zicheng Liu and Mei Chen and Lu Yuan},
url = {https://arxiv.org/abs/2102.10490},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Zijun; Cofer, Evan M.; Troyanskaya, Olga G.
AMBIENT: Accelerated Convolutional Neural Network Architecture Search for Regulatory Genomics Proceedings Article
In: Machine Learning in Computational Biology (MLCB 2020), 2021.
@inproceedings{ZhangMLCB2020,
title = {AMBIENT: Accelerated Convolutional Neural Network Architecture Search for Regulatory Genomics},
author = {Zijun Zhang and Evan M. Cofer and Olga G. Troyanskaya},
url = {https://www.biorxiv.org/content/biorxiv/early/2021/02/27/2021.02.25.432960.full.pdf},
year = {2021},
date = {2021-01-01},
booktitle = {Machine Learning in Computational Biology (MLCB 2020)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, J; Jiang, Y; Huang, Z; Guo, X; Wu, B; Sun, L; Wu, T
Fine-Grained Detection of Driver Distraction Based on Neural Architecture Search Journal Article
In: IEEE Transactions on Intelligent Transportation Systems, pp. 1-19, 2021.
@article{9352235,
title = {Fine-Grained Detection of Driver Distraction Based on Neural Architecture Search},
author = {J Chen and Y Jiang and Z Huang and X Guo and B Wu and L Sun and T Wu},
url = {https://ieeexplore.ieee.org/abstract/document/9352235},
doi = {10.1109/TITS.2021.3055545},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Intelligent Transportation Systems},
pages = {1-19},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, T; Lei, C; Zhang, Z; Meng, X -B; Chen, C L P
AS-NAS: Adaptive Scalable Neural Architecture Search with Reinforced Evolutionary Algorithm for Deep Learning Journal Article
In: IEEE Transactions on Evolutionary Computation, pp. 1-1, 2021.
@article{9360872,
title = {AS-NAS: Adaptive Scalable Neural Architecture Search with Reinforced Evolutionary Algorithm for Deep Learning},
author = {T Zhang and C Lei and Z Zhang and X -B Meng and C L P Chen},
url = {https://ieeexplore.ieee.org/abstract/document/9360872},
doi = {10.1109/TEVC.2021.3061466},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Evolutionary Computation},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xie, Guoyang; Wang, Jinbao; Yu, Guo; Zheng, Feng; Jin, Yaochu
Tiny Adversarial Mulit-Objective Oneshot Neural Architecture Search Technical Report
2021.
@techreport{xie2021tiny,
title = {Tiny Adversarial Mulit-Objective Oneshot Neural Architecture Search},
author = {Guoyang Xie and Jinbao Wang and Guo Yu and Feng Zheng and Yaochu Jin},
url = {https://arxiv.org/abs/2103.00363},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Le, Cat P; Soltani, Mohammadreza; Ravier, Robert; Tarokh, Vahid
Neural Architecture Search From Task Similarity Measure Technical Report
2021.
@techreport{le2021neural,
title = {Neural Architecture Search From Task Similarity Measure},
author = {Cat P Le and Mohammadreza Soltani and Robert Ravier and Vahid Tarokh},
url = {https://arxiv.org/abs/2103.00241},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Guo, Yong; Chen, Yaofo; Zheng, Yin; Chen, Qi; Zhao, Peilin; Chen, Jian; Huang, Junzhou; Tan, Mingkui
Pareto-Frontier-aware Neural Architecture Generation for Diverse Budgets Technical Report
2021.
@techreport{guo2021paretofrontieraware,
title = {Pareto-Frontier-aware Neural Architecture Generation for Diverse Budgets},
author = {Yong Guo and Yaofo Chen and Yin Zheng and Qi Chen and Peilin Zhao and Jian Chen and Junzhou Huang and Mingkui Tan},
url = {https://arxiv.org/abs/2103.00219},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jeong, Wonyong; Lee, Hayeon; Park, Gun; Hyung, Eunyoung; Baek, Jinheon; Hwang, Sung Ju
Task-Adaptive Neural Network Retrieval with Meta-Contrastive Learning Technical Report
2021.
@techreport{jeong2021taskadaptive,
title = {Task-Adaptive Neural Network Retrieval with Meta-Contrastive Learning},
author = {Wonyong Jeong and Hayeon Lee and Gun Park and Eunyoung Hyung and Jinheon Baek and Sung Ju Hwang},
url = {https://arxiv.org/abs/2103.01495},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Ziwei; Wang, Xin; Zhu, Wenwu
Automated Machine Learning on Graphs: A Survey Technical Report
2021.
@techreport{zhang2021automated,
title = {Automated Machine Learning on Graphs: A Survey},
author = {Ziwei Zhang and Xin Wang and Wenwu Zhu},
url = {https://arxiv.org/abs/2103.00742},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Liu, Luyan; Wen, Zhiwei; Liu, Songwei; Zhou, Hong-Yu; Zhu, Hongwei; Xie, Weicheng; Shen, Linlin; Ma, Kai; Zheng, Yefeng
MixSearch: Searching for Domain Generalized Medical Image Segmentation Architectures Technical Report
2021.
@techreport{liu2021mixsearch,
title = {MixSearch: Searching for Domain Generalized Medical Image Segmentation Architectures},
author = {Luyan Liu and Zhiwei Wen and Songwei Liu and Hong-Yu Zhou and Hongwei Zhu and Weicheng Xie and Linlin Shen and Kai Ma and Yefeng Zheng},
url = {https://arxiv.org/abs/2102.13280},
year = {2021},
date = {2021-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}