Qi, Jiaju; Lei, Lei; Jonsson, Thorsteinn; Hanzo, Lajos
Electric Bus Charging Schedules Relying on Real Data-Driven Targets Based on Hierarchical Deep Reinforcement Learning Journal Article
In: IEEE Access, vol. 13, pp. 99415–99433, 2025, ISSN: 2169-3536.
Abstract | Links | BibTeX | Tags: {>}Deep reinforcement learning, Batteries, charging control, Costs, deep reinforcement learning, electric bus, Electricity, hierarchical reinforcement learning, Real-time systems, Schedules, Scheduling, Stochastic processes, Uncertainty, Vehicle-to-grid
@article{qi_electric_2025,
title = {Electric Bus Charging Schedules Relying on Real Data-Driven Targets Based on Hierarchical Deep Reinforcement Learning},
author = {Jiaju Qi and Lei Lei and Thorsteinn Jonsson and Lajos Hanzo},
url = {https://ieeexplore.ieee.org/document/11006647},
doi = {10.1109/ACCESS.2025.3571211},
issn = {2169-3536},
year = {2025},
date = {2025-01-01},
urldate = {2025-10-08},
journal = {IEEE Access},
volume = {13},
pages = {99415–99433},
abstract = {The charging scheduling problem of Electric Buses (EBs) is investigated based on Deep Reinforcement Learning (DRL). A Markov Decision Process (MDP) is conceived, where the time horizon includes multiple charging and operating periods in a day, while each period is further divided into multiple time steps. To overcome the challenge of long-range multi-phase planning with sparse reward, we conceive Hierarchical DRL (HDRL) for decoupling the original MDP into a high-level Semi-MDP (SMDP) and multiple low-level MDPs. The Hierarchical Double Deep Q-Network (HDDQN)-Hindsight Experience Replay (HER) algorithm is proposed for simultaneously solving the decision problems arising at different temporal resolutions. As a result, the high-level agent learns an effective policy for prescribing the charging targets for every charging period, while the low-level agent learns an optimal policy for setting the charging power of every time step within a single charging period, with the aim of minimizing the charging costs while meeting the charging target. It is proved that the flat policy constructed by superimposing the optimal high-level policy and the optimal low-level policy performs as well as the optimal policy of the original MDP. Since jointly learning both levels of policies is challenging due to the non-stationarity of the high-level agent and the sampling inefficiency of the low-level agent, we divide the joint learning process into two phases and exploit our new HER algorithm to manipulate the experience replay buffers for both levels of agents. Numerical experiments are performed with the aid of real-world data to evaluate the performance of the proposed algorithm.},
keywords = {{>}Deep reinforcement learning, Batteries, charging control, Costs, deep reinforcement learning, electric bus, Electricity, hierarchical reinforcement learning, Real-time systems, Schedules, Scheduling, Stochastic processes, Uncertainty, Vehicle-to-grid},
pubstate = {published},
tppubtype = {article}
}
Chen, Tianrui; Zhang, Xinruo; You, Minglei; Zheng, Gan; Lambotharan, Sangarapillai
Federated Learning Enabled Link Scheduling in D2D Wireless Networks Journal Article
In: IEEE Wireless Communications Letters, vol. 13, no. 1, pp. 89–92, 2024, ISSN: 2162-2345.
Abstract | Links | BibTeX | Tags: Computational modeling, device-to-device (D2D), Device-to-device communication, Federated learning, link scheduling, Scheduling, Servers, Training, Wireless networks
@article{chen_federated_2024,
title = {Federated Learning Enabled Link Scheduling in D2D Wireless Networks},
author = {Tianrui Chen and Xinruo Zhang and Minglei You and Gan Zheng and Sangarapillai Lambotharan},
url = {https://ieeexplore.ieee.org/document/10268986},
doi = {10.1109/LWC.2023.3321500},
issn = {2162-2345},
year = {2024},
date = {2024-01-01},
urldate = {2025-10-08},
journal = {IEEE Wireless Communications Letters},
volume = {13},
number = {1},
pages = {89–92},
abstract = {Centralized machine learning methods for device-to-device (D2D) link scheduling may lead to a computing burden for a central server, transmission latency for decisions, and privacy issues for D2D communications. To mitigate these challenges, a federated learning (FL) based method is proposed to solve the link scheduling problem, where a global model is distributedly trained at local devices, and a server is used for aggregating model parameters instead of training samples. Specially, a more realistic scenario with limited channel state information (CSI) is considered instead of full CSI. Despite a decentralized implementation, simulation results demonstrate that the proposed FL based approach with limited CSI performs close to the conventional optimization algorithm. In addition, the FL based solution achieves almost the same performance as that of the centralized training.},
keywords = {Computational modeling, device-to-device (D2D), Device-to-device communication, Federated learning, link scheduling, Scheduling, Servers, Training, Wireless networks},
pubstate = {published},
tppubtype = {article}
}