@inproceedings{f4f9e2b3c8e549e0bd36d1db65a0531e,
title = "Local Texture Complexity Guided Adversarial Attack",
abstract = "Extensive research revealed that deep neural networks are vulnerable to adversarial examples. In addition, recent studies have demonstrated that convolutional neural networks tend to recognize the texture (high-frequency components) rather than the shape (low-frequency components) of images. Thus, crafting adversarial perturbation in the frequency domain is proposed to enhance the attack strength. However, these methods either will increase the perceptibility of adversarial examples to the human visual system (HVS) or increase the computational effort in generating adversarial examples. To generate adversarial examples with better imperceptibility while consuming less computational effort, we propose an adversarial attack method to construct adversarial examples in the frequency domain with guidance from the local texture complexity of the image. Experiments on ImageNet and CIFAR-10 show that the proposed method is effective in generating adversarial examples imperceptible to the HVS.",
keywords = "Adversarial attack, Adversarial examples, Frequency domain, Texture complexity, Wavelet",
author = "Jiefei Zhang and Jie Wang and Wanli Lyu and Zhaoxia Yin",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 30th IEEE International Conference on Image Processing, ICIP 2023 ; Conference date: 08-10-2023 Through 11-10-2023",
year = "2023",
doi = "10.1109/ICIP49359.2023.10222176",
language = "英语",
series = "Proceedings - International Conference on Image Processing, ICIP",
publisher = "IEEE Computer Society",
pages = "2065--2069",
booktitle = "2023 IEEE International Conference on Image Processing, ICIP 2023 - Proceedings",
address = "美国",
}