@inproceedings{85b3ee2182f546ad8d46af785752e6ca,
title = "A Dropout-resilient Verifiable Privacy-Preserving Federated Learning",
abstract = "Federated learning enables multiple parties to jointly train a global model without sharing the original data, which has attracted much attention. Existing research work shows that even sharing local gradients will leak local data. What's worse, the server may deliberately tamper with the aggregation results, resulting in user privacy leakage or other attacks, so users need to verify the correctness of the calculation results returned by the server. In this paper, we design a verifiable privacy-preserving scheme where the server is honest and curious but has the additional ability to forge the aggregated results. The proposed scheme can guarantee the privacy gradient of honest users under the condition that no more than t users collude with the server. During the execution of the protocol, the user is allowed to drop out at any phase, and the aggregated results is kept secret from the server. In addition, each user can verify the correctness of the server's calculation results, which is the ciphertext of the aggregated results.",
keywords = "Dropout, Federated learning, Privacy-preserving, Verifiable",
author = "Hao Qian and Xiaolei Dong and Jiachen Shen and Zhenfu Cao",
note = "Publisher Copyright: {\textcopyright} 2023 SPIE.; 3rd International Conference on Artificial Intelligence and Computer Engineering, ICAICE 2022 ; Conference date: 11-11-2022 Through 13-11-2022",
year = "2023",
doi = "10.1117/12.2671145",
language = "英语",
series = "Proceedings of SPIE - The International Society for Optical Engineering",
publisher = "SPIE",
editor = "Xiaoli Li",
booktitle = "Third International Conference on Artificial Intelligence and Computer Engineering, ICAICE 2022",
address = "美国",
}