Huang, Yifan; Liu, Yi; Liu, Wenjie; Papadimitratos, Panos; Haas, Harald; Tavakkolnia, Iman
Interference-Resilient Optical Wireless Positioning via Machine Learning-Enhanced Subset Filtering Proceedings Article
In: 2025.
BibTeX | Tags: LRDC, machine learning, Optical wireless communication
@inproceedings{huang_interference-resilient_2025,
title = {Interference-Resilient Optical Wireless Positioning via Machine Learning-Enhanced Subset Filtering},
author = {Yifan Huang and Yi Liu and Wenjie Liu and Panos Papadimitratos and Harald Haas and Iman Tavakkolnia},
year = {2025},
date = {2025-09-01},
keywords = {LRDC, machine learning, Optical wireless communication},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Xinyu; El-Hajjar, Mohammed; Xu, Chao; Hanzo, Lajos
Graph Neural Network Aided Detection for the Multi-User Multi-Dimensional Index Modulated Uplink Journal Article
In: IEEE Open Journal of Vehicular Technology, vol. 6, pp. 1593–1612, 2025, ISSN: 2644-1330.
Abstract | Links | BibTeX | Tags: Artificial neural networks, Detectors, graph factor, graph neural network (GNN), Graph neural networks, Index modulation (IM), Indexes, machine learning, Message passing, message passing (MP), multi-user, Next generation networking, Peak to average power ratio, Symbols, Uplink, Vectors
@article{feng_graph_2025,
title = {Graph Neural Network Aided Detection for the Multi-User Multi-Dimensional Index Modulated Uplink},
author = {Xinyu Feng and Mohammed El-Hajjar and Chao Xu and Lajos Hanzo},
url = {https://ieeexplore.ieee.org/document/11017516},
doi = {10.1109/OJVT.2025.3574934},
issn = {2644-1330},
year = {2025},
date = {2025-01-01},
urldate = {2025-10-08},
journal = {IEEE Open Journal of Vehicular Technology},
volume = {6},
pages = {1593–1612},
abstract = {The concept of Compressed Sensing-aided Space-Frequency Index Modulation (CS-SFIM) is conceived for the Large-Scale Multi-User Multiple-Input Multiple-Output Uplink (LS-MU-MIMO-UL) of Next-Generation (NG) networks. Explicitly, in CS-SFIM, the information bits are mapped to both spatial- and frequency-domain indices, where we treat the activation patterns of the transmit antennas and of the subcarriers separately. Serving a large number of users in an MU-MIMO-UL system leads to substantial Multi-User Interference (MUI). Hence, we design the Space-Frequency (SF) domain matrix as a joint factor graph, where the Approximate Message Passing (AMP) and Expectation Propagation (EP) based MU detectors can be utilized. In the LS-MU-MIMO-UL scenario considered, the proposed system uses optimal Maximum Likelihood (ML) and Minimum Mean Square Error (MMSE) detectors as benchmarks for comparison with the proposed MP-based detectors. These MP-based detectors significantly reduce the detection complexity compared to ML detection, making the design eminently suitable for LS-MU scenarios. To further reduce the detection complexity and improve the detection performance, we propose a pair of Graph Neural Network (GNN) based detectors, which rely on the orthogonal AMP (OAMP) and on the EP algorithm, which we refer to as the GNN-AMP and GEPNet detectors, respectively. The GEPNet detector maximizes the detection performance, while the GNN-AMP detector strikes a performance versus complexity trade-off. The GNN is trained for a single system configuration and yet it can be used for any number of users in the system. The simulation results show that the GNN-based detector approaches the ML performance in various configurations.},
keywords = {Artificial neural networks, Detectors, graph factor, graph neural network (GNN), Graph neural networks, Index modulation (IM), Indexes, machine learning, Message passing, message passing (MP), multi-user, Next generation networking, Peak to average power ratio, Symbols, Uplink, Vectors},
pubstate = {published},
tppubtype = {article}
}
Aristodemou, Marios; Liu, Xiaolan; Lambotharan, Sangarapillai; AsSadhan, Basil
Bayesian Optimization-Driven Adversarial Poisoning Attacks Against Distributed Learning Journal Article
In: IEEE Access, vol. 11, pp. 86214–86226, 2023, ISSN: 2169-3536.
Abstract | Links | BibTeX | Tags: Adversarial machine learning, Adversarial machine learning (AdvML), Data models, Distance learning, Distributed databases, Federated learning, federated learning (FL), Human factors, machine learning, Metaverse, Optimization, poisoning attacks, Servers, split learning (SL), Training
@article{aristodemou_bayesian_2023,
title = {Bayesian Optimization-Driven Adversarial Poisoning Attacks Against Distributed Learning},
author = {Marios Aristodemou and Xiaolan Liu and Sangarapillai Lambotharan and Basil AsSadhan},
url = {https://ieeexplore.ieee.org/document/10214572},
doi = {10.1109/ACCESS.2023.3304541},
issn = {2169-3536},
year = {2023},
date = {2023-01-01},
urldate = {2025-10-08},
journal = {IEEE Access},
volume = {11},
pages = {86214–86226},
abstract = {Metaverse is envisioned to be the next-generation human-centric Internet which can offer an immersive experience for users with a broad application in healthcare, education, entertainment, and industries. These applications require the analysis of massive data that contains private and sensitive information. A potential solution to preserving privacy is deploying distributed learning frameworks, including federated learning (FL) and split learning (SL), due to their ability to address privacy leakage and analyze personalised data without sharing raw data. However, it is known that FL and SL are still susceptible to adversarial poisoning attacks. In this paper, we analyse such critical issues for the privacy-preserving mechanism in Metaverse services. We develop a novel poisoning attack based on Bayesian optimisation to emulate the adversarial behaviour against FL (BO-FLPA) and SL (BO-SLPA) which is important for the development of effective defense algorithms in the future. Specifically, we develop a layer optimisation method using the intuition of black-box optimisation with assuming that there is a function between the prediction’s uncertainty and layer optimisation parameters. The result of this optimisation provides the optimal weight parameters for the hidden layer, such as the first or the second layer for FL, and the first layer for SL. Numerical results demonstrate that in both FL and SL, the poisoned hidden layers have the ability to increase the susceptibility of the model to adversarial attacks in terms of prediction with low confidence or having a larger deviation of the probability density function of the predictions.},
keywords = {Adversarial machine learning, Adversarial machine learning (AdvML), Data models, Distance learning, Distributed databases, Federated learning, federated learning (FL), Human factors, machine learning, Metaverse, Optimization, poisoning attacks, Servers, split learning (SL), Training},
pubstate = {published},
tppubtype = {article}
}