Qi, Jiaju; Lei, Lei; Jonsson, Thorsteinn; Hanzo, Lajos
Electric Bus Charging Schedules Relying on Real Data-Driven Targets Based on Hierarchical Deep Reinforcement Learning Journal Article
In: IEEE Access, vol. 13, pp. 99415–99433, 2025, ISSN: 2169-3536.
Abstract | Links | BibTeX | Tags: {>}Deep reinforcement learning, Batteries, charging control, Costs, deep reinforcement learning, electric bus, Electricity, hierarchical reinforcement learning, Real-time systems, Schedules, Scheduling, Stochastic processes, Uncertainty, Vehicle-to-grid
@article{qi_electric_2025,
title = {Electric Bus Charging Schedules Relying on Real Data-Driven Targets Based on Hierarchical Deep Reinforcement Learning},
author = {Jiaju Qi and Lei Lei and Thorsteinn Jonsson and Lajos Hanzo},
url = {https://ieeexplore.ieee.org/document/11006647},
doi = {10.1109/ACCESS.2025.3571211},
issn = {2169-3536},
year = {2025},
date = {2025-01-01},
urldate = {2025-10-08},
journal = {IEEE Access},
volume = {13},
pages = {99415–99433},
abstract = {The charging scheduling problem of Electric Buses (EBs) is investigated based on Deep Reinforcement Learning (DRL). A Markov Decision Process (MDP) is conceived, where the time horizon includes multiple charging and operating periods in a day, while each period is further divided into multiple time steps. To overcome the challenge of long-range multi-phase planning with sparse reward, we conceive Hierarchical DRL (HDRL) for decoupling the original MDP into a high-level Semi-MDP (SMDP) and multiple low-level MDPs. The Hierarchical Double Deep Q-Network (HDDQN)-Hindsight Experience Replay (HER) algorithm is proposed for simultaneously solving the decision problems arising at different temporal resolutions. As a result, the high-level agent learns an effective policy for prescribing the charging targets for every charging period, while the low-level agent learns an optimal policy for setting the charging power of every time step within a single charging period, with the aim of minimizing the charging costs while meeting the charging target. It is proved that the flat policy constructed by superimposing the optimal high-level policy and the optimal low-level policy performs as well as the optimal policy of the original MDP. Since jointly learning both levels of policies is challenging due to the non-stationarity of the high-level agent and the sampling inefficiency of the low-level agent, we divide the joint learning process into two phases and exploit our new HER algorithm to manipulate the experience replay buffers for both levels of agents. Numerical experiments are performed with the aid of real-world data to evaluate the performance of the proposed algorithm.},
keywords = {{>}Deep reinforcement learning, Batteries, charging control, Costs, deep reinforcement learning, electric bus, Electricity, hierarchical reinforcement learning, Real-time systems, Schedules, Scheduling, Stochastic processes, Uncertainty, Vehicle-to-grid},
pubstate = {published},
tppubtype = {article}
}
Yan, Hua; Chen, Yunfei
Optimum Distance for In-Flight UAV-to-UAV Wireless Charging Journal Article
In: IEEE Access, vol. 13, pp. 143914–143924, 2025, ISSN: 2169-3536.
Abstract | Links | BibTeX | Tags: Aperture antennas, Autonomous aerial vehicles, Batteries, Energy Efficiency, Energy loss, far-field, Inductive charging, near-field, Receiving antennas, RF signals, Simultaneous wireless information and power transfer, Transmitting antennas, UAV communications, Wireless communication, Wireless communications, wireless power transfer (WPT)
@article{yan_optimum_2025,
title = {Optimum Distance for In-Flight UAV-to-UAV Wireless Charging},
author = {Hua Yan and Yunfei Chen},
url = {https://ieeexplore.ieee.org/document/11123803/},
doi = {10.1109/ACCESS.2025.3598733},
issn = {2169-3536},
year = {2025},
date = {2025-01-01},
urldate = {2025-10-08},
journal = {IEEE Access},
volume = {13},
pages = {143914–143924},
abstract = {Wireless charging is a promising technology for communications using battery-powered unmanned aerial vehicles (UAVs). In this paper, the optimal distance for UAV-to-UAV in-flight wireless charging and communications is studied. Considering the practical applications, two schemes are proposed. In the first scheme, the discharging UAV (D-UAV) and the charged UAV (C-UAV) are aligned during charging, which requires the D-UAV and the C-UAV to remain relatively stationary. In the second scheme, the D-UAV and the C-UAV move during charging. For both schemes, we aim to maximize the received energy at the C-UAV under the condition that the minimum achievable rate for communications is met. Numerical results show that the optimal distance exists in the Fresnel zone. They also show that the optimal distance increases with the charging frequency. This work provides useful guidance for UAV in-flight wireless charging and communications system designs.},
keywords = {Aperture antennas, Autonomous aerial vehicles, Batteries, Energy Efficiency, Energy loss, far-field, Inductive charging, near-field, Receiving antennas, RF signals, Simultaneous wireless information and power transfer, Transmitting antennas, UAV communications, Wireless communication, Wireless communications, wireless power transfer (WPT)},
pubstate = {published},
tppubtype = {article}
}