@inproceedings{b7e1df7723774ef19fac9af9274c4294,
title = "Efficient Universal Goal Hijacking with Semantics-guided Prompt Organization",
abstract = "Universal goal hijacking is a kind of prompt injection attack that forces LLMs to return a target malicious response for arbitrary normal user prompts. The previous methods achieve high attack performance while being too cumbersome and time-consuming. Also, they have concentrated solely on optimization algorithms, overlooking the crucial role of the prompt. To this end, we propose a method called POUGH that incorporates an efficient optimization algorithm and two semantics-guided prompt organization strategies. Specifically, our method starts with a sampling strategy to select representative prompts from a candidate pool, followed by a ranking strategy that prioritizes them. Given the sequentially ranked prompts, our method employs an iterative optimization algorithm to generate a fixed suffix that can concatenate to arbitrary user prompts for universal goal hijacking. Experiments conducted on four popular LLMs and ten types of target responses verified the effectiveness. Warning: This paper contains model outputs that are offensive in nature.",
author = "Yihao Huang and Chong Wang and Xiaojun Jia and Qing Guo and Felix Juefei-Xu and Jian Zhang and Yang Liu and Geguang Pu",
note = "Publisher Copyright: {\textcopyright} 2025 Association for Computational Linguistics.; 63rd Annual Meeting of the Association for Computational Linguistics, ACL 2025 ; Conference date: 27-07-2025 Through 01-08-2025",
year = "2025",
language = "英语",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "5796--5816",
editor = "Wanxiang Che and Joyce Nabende and Ekaterina Shutova and Pilehvar, \{Mohammad Taher\}",
booktitle = "Long Papers",
address = "澳大利亚",
}