@article{5f5f2b3f95f74ce28658efdf805004a2,
title = "Multimodal fusion network with contrary latent topic memory for rumor detection",
abstract = "Rumors can mislead readers and even have a negative impact on public events, especially multimodal rumors with text and images, which are easier to attract readers' attention. Most existing methods focus on capturing specific characteristics of rumor events and have difficulty in identifying unknown rumor events. In this paper, we propose a multimodal rumor detection network (termed as MRDN) for social rumor detection. MRDN combines the complementary information of text and images through the mechanism of multi-head self-attention fusion (MSF), which allocates weight to different modalities to carry out feature fusion from multiple perspectives. Furthermore, MRDN exploits contrary latent topic memory network (CLTM) to store semantic information about true and false patterns of rumors, which is useful for identifying upcoming new rumors. Extensive experiments conducted on three public datasets show that our multimodal rumor detection method outperforms the state-of-the-art approaches.",
keywords = "Data mining, Explosions, Feature extraction, Fuses, Semantics, Social networking (online), Visualization",
author = "Jiaxin CHEN and Zekai WU and Zhenguo YANG and Haoran XIE and WANG, {Fu Lee} and Wenyin LIU",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.",
year = "2022",
month = feb,
doi = "10.1109/MMUL.2022.3146568",
language = "English",
volume = "29",
pages = "104--113",
journal = "IEEE Multimedia",
issn = "1070-986X",
publisher = "IEEE Computer Society",
number = "1",
}