@inproceedings{d41acc5ae55a4ec39d7f09ebf8fc27f1,
title = "CM-PGD: Adversarial Attacks by Concept-Based Explainable AI",
abstract = "In recent years, Explainable AI (XAI) has become increasingly critical, and research on XAI has progressed in various aspects, such as Learned Feature, Pixel Attribution, and Concept Detection in image classification. Among them, concept-based explanation is one of the major methods to understand the predictions by a neural network. In this paper, we combine concept-based explanation and adversarial attack together in order to optimize the process of direct adversarial attack. We first propose Concept Mask, which offers a way to explain the decisions of a model. Then, we propose a Concept Masked Projected Gradient Descent (CM-PGD) based attack, which can generate adversarial examples with fewer total pixel change size and fewer changed pixel count. Compared with Projected Gradient Descent (PGD) attack, CM-PGD can reduce 20\%–50\% total pixel change size and 20\%–40\% changed pixel count on average.",
keywords = "Adversarial Attack, Deep Learning, Explainable AI",
author = "Shengkai Xu and Min Zhang and Jiangtao Wang",
note = "Publisher Copyright: {\textcopyright} The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2024.; International Conference on Image, Vision and Intelligent Systems, ICIVIS 2023 ; Conference date: 16-08-2023 Through 18-08-2023",
year = "2024",
doi = "10.1007/978-981-97-0855-0\_61",
language = "英语",
isbn = "9789819708543",
series = "Lecture Notes in Electrical Engineering",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "635--649",
editor = "Peng You and Shuaiqi Liu and Jun Wang",
booktitle = "Proceedings of International Conference on Image, Vision and Intelligent Systems, ICIVIS 2023",
address = "德国",
}