@inproceedings{06e21b4344774b24b5c0884fef1979d6,
title = "MuseCNN: Embedding-Guided Polyphonic Music Accompaniment Generation",
abstract = "Although various methods are proposed to generate music accompaniment tracks according to the main music track, they suffer from the problems of modeling music dependencies and representing music data, therefore, generating high-quality music accompaniment remains a challenging task. To address this issue, we propose a multi-track sequential convolutional neural network (MuseCNN) to generate accompaniment tracks corresponding to the main music track. Inspired by the similarity between pianoroll representation and pictures, we transform the pianoroll data into a two-channel music representation matrix and feed it into the convolutional neural network (CNN). Using a hierarchical loss function, we integrate all music tracks to maintain the coherence and harmony of the music. Based on the three-level loss design and multiple CNNs, the problem of modeling music dependencies can be solved. The experimental results indicate that our multi-CNN model can effectively learn complex music dependencies and generate harmonious long-sequence polyphonic music.",
keywords = "Embedding Learning, Machine Learning, Music Accompaniment Generation",
author = "Yuyang Wang and Yutong Ye and Yingbo Zhou and Qi Wen and Xiang Lian and Xian Wei and Mingsong Chen",
note = "Publisher Copyright: {\textcopyright} 2025 Knowledge Systems Institute Graduate School. All rights reserved.; 37th International Conference on Software Engineering and Knowledge Engineering, SEKE 2025 ; Conference date: 29-09-2025 Through 04-10-2025",
year = "2025",
doi = "10.18293/SEKE2025-069",
language = "英语",
series = "Proceedings of the International Conference on Software Engineering and Knowledge Engineering, SEKE",
publisher = "Knowledge Systems Institute Graduate School",
pages = "271--276",
booktitle = "Proceedings - SEKE 2025",
address = "美国",
}