Publications
Song, R.; Zhou, L.; Lyu, L.; Festag, A.; Knoll, A.
ResFed: Communication Efficient Federated Learning by Transmitting Deep Compressed Residuals Journal Article
In: IEEE Internet of Things Journal, 2023.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@article{Song:IEEE-IOT:2023_ResFed,
title = {ResFed: Communication Efficient Federated Learning by Transmitting Deep Compressed Residuals},
author = {R. Song and L. Zhou and L. Lyu and A. Festag and A. Knoll},
url = {https://ieeexplore.ieee.org/document/10283999},
doi = {10.1109/JIOT.2023.3324079},
year = {2023},
date = {2023-10-12},
urldate = {2023-10-12},
journal = {IEEE Internet of Things Journal},
abstract = {Federated learning allows for cooperative training among distributed clients by sharing their locally learned model parameters, such as weights or gradients. However, as model size increases, the communication bandwidth required for deployment in wireless networks becomes a bottleneck. To address this, we propose a residual-based federated learning framework (ResFed) that transmits residuals instead of gradients or weights in networks. By predicting model updates at both clients and the server, residuals are calculated as the difference between updated and predicted models and contain more dense information than weights or gradients. We find that the residuals are less sensitive to an increasing compression ratio than other parameters, and hence use lossy compression techniques on residuals to improve communication efficiency for training in federated settings. With the same compression ratio, ResFed outperforms current methods (weight- or gradient-based federated learning) by over 1.4x on federated datasets, including MNIST, FashionMNIST, SVHN, CIFAR-10, CIFAR-100, FEMNIST, in client-to-server communication, and can also be applied to reduce communication costs for server-to-client communication.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Zhou, L.; Song, R.; Chen, G.; Festag, A.; Knoll, A.
Residual Encoding Framework to Compress DNN Parameters for Fast Transfer Journal Article
In: Knowledge-Based Systems, vol. 277, pp. 110815, 2023, ISSN: 0950-7051.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@article{Zhou:KnowledgeBasedSystems:2023_ResidualEncoding,
title = {Residual Encoding Framework to Compress DNN Parameters for Fast Transfer},
author = {L. Zhou and R. Song and G. Chen and A. Festag and A. Knoll},
url = {https://www.sciencedirect.com/science/article/pii/S0950705123005658},
doi = {10.1016/j.knosys.2023.110815},
issn = {0950-7051},
year = {2023},
date = {2023-10-09},
urldate = {2023-10-09},
journal = {Knowledge-Based Systems},
volume = {277},
pages = {110815},
abstract = {Efficient communication is significant for federated learning and DNN model deployment. However, transferring hundreds of millions of DNN parameters over networks with limited bandwidth results in long communication delays or even data losses. To alleviate or even remove the communication bottleneck, efficient methods for parameter compression can be applied. Inspired by video encoding, which exploits inter-frame similarity for compression, we investigate the strong temporal correlations of parameter updates in two near epochs of the DNN model and introduce a model parameter residual encoding framework. By transmitting encoded residual between model parameters in two near epochs, the receiver can reconstruct new model parameters and finish the updates with less communication cost. Furthermore, with respect to our framework, we develop lossless and lossy model parameter compression methods and demonstrate them on popular classification and detection networks. The results show that the lossless method can compress the data size of the parameters to less than 90%, and the lossy method can shrink the parameter size to less than 50% with a fair low loss. Our source code is released at https://github.com/zhouliguo/DNN_param_encode.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Song, R.; Lyu, L.; Jiang, W.; Festag, A.; Knoll, A.
V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection Journal Article
In: arXiv, 2023, (arXiv preprint arXiv:2305.11654).
Abstract | Links | BibTeX | Tags: client selection, federated learning, Intelligent Transport Systems
@article{Song:ARXIV:2023_V2XBoosted,
title = {V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection},
author = {R. Song and L. Lyu and W. Jiang and A. Festag and A. Knoll},
url = {https://arxiv.org/pdf/2305.11654},
year = {2023},
date = {2023-06-21},
urldate = {2023-06-21},
journal = {arXiv},
abstract = {Machine learning (ML) has revolutionized transportation systems, enabling autonomous driving and smart traffic services. Federated learning (FL) overcomes privacy constraints by training ML models in distributed systems, exchanging model parameters instead of raw data. However, the dynamic states of connected vehicles affect the network connection quality and influence the FL performance. To tackle this challenge, we propose a contextual client selection pipeline that uses Vehicle-to-Everything (V2X) messages to select clients based on the predicted communication latency. The pipeline includes: (i) fusing V2X messages, (ii) predicting future traffic topology, (iii) pre-clustering clients based on local data distribution similarity, and (iv) selecting clients with minimal latency for future model aggregation. Experiments show that our pipelineoutperforms baselines on various datasets, particularly in non-iid settings.},
note = {arXiv preprint arXiv:2305.11654},
keywords = {client selection, federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Song, R.; Liu, D.; Chen, D. Z.; Festag, A.; Trinitis, C.; Schulz, M.; Knoll, A.
Federated Learning via Decentralized Dataset Distillation in Resource-Constrained Edge Environments Proceedings Article
In: International Joint Conference on Neural Networks (IJCNN 2023), Queensland, Australia, 2023.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@inproceedings{Song:IJCNN:2023,
title = {Federated Learning via Decentralized Dataset Distillation in Resource-Constrained Edge Environments},
author = {R. Song and D. Liu and D. Z. Chen and A. Festag and C. Trinitis and M. Schulz and A. Knoll},
url = {https://2023.ijcnn.org},
doi = {10.1109/IJCNN54540.2023.10191879},
year = {2023},
date = {2023-06-20},
urldate = {2023-06-20},
booktitle = {International Joint Conference on Neural Networks (IJCNN 2023)},
address = {Queensland, Australia},
abstract = {In federated learning, all networked clients contribute to the model training cooperatively. However, with model sizes increasing, even sharing the trained partial models often leads to severe communication bottlenecks in underlying networks, especially when communicated iteratively. In this paper, we introduce a federated learning framework FedD3 requiring only one-shot communication by integrating dataset distillation instances. Instead of sharing model updates in other federated learning approaches, FedD3 allows the connected clients to distill the local datasets independently, and then aggregates those decentralized distilled datasets (e.g. a few unrecognizable images) from networks for model training. Our experimental results show that FedD3 significantly outperforms other federated learning frameworks in terms of needed communication volumes, while it provides the additional benefit to be able to balance the trade-off between accuracy and communication cost, depending on usage scenario or target dataset. For instance, for training an AlexNet model on CIFAR-10 with 10 clients under non-independent and identically distributed (Non-IID) setting, FedD3 can either increase the accuracy by over 71% with a similar communication volume, or save 98% of communication volume, while reaching the same accuracy, compared to other one-shot federated learning approaches.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, R.; Lyu, L.; Jiang, W.; Festag, A.; Knoll, A.
V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection Proceedings Article
In: International Joint Conference on Neural Networks (IJCNN 2023) Workshop on Collaborative Perception and Learning, London, UK, 2023.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@inproceedings{Song:ICRA:2023,
title = {V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection},
author = {R. Song and L. Lyu and W. Jiang and A. Festag and A. Knoll},
url = {https://www.icra2023.org/programme/workshops-tutorials},
doi = {10.48550/arXiv.2305.11654},
year = {2023},
date = {2023-05-30},
urldate = {2023-05-30},
booktitle = {International Joint Conference on Neural Networks (IJCNN 2023) Workshop on Collaborative Perception and Learning},
address = {London, UK},
abstract = {In federated learning, all networked clients contribute to the model training cooperatively. However, with model sizes increasing, even sharing the trained partial models often leads to severe communication bottlenecks in underlying networks, especially when communicated iteratively. In this paper, we introduce a federated learning framework FedD3 requiring only one-shot communication by integrating dataset distillation instances. Instead of sharing model updates in other federated learning approaches, FedD3 allows the connected clients to distill the local datasets independently, and then aggregates those decentralized distilled datasets (e.g. a few unrecognizable images) from networks for model training. Our experimental results show that FedD3 significantly outperforms other federated learning frameworks in terms of needed communication volumes, while it provides the additional benefit to be able to balance the trade-off between accuracy and communication cost, depending on usage scenario or target dataset. For instance, for training an AlexNet model on CIFAR-10 with 10 clients under non-independent and identically distributed (Non-IID) setting, FedD3 can either increase the accuracy by over 71% with a similar communication volume, or save 98% of communication volume, while reaching the same accuracy, compared to other one-shot federated learning approaches.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, R.; Xu, R.; Festag, A.; Ma, J.; Knoll, A.
FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems Journal Article
In: IEEE Transactions on Intelligent Vehicles, vol. 9, no. 1, pp. 958-969, 2023.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@article{Song:TIV:2023-FedBEVT,
title = {FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems},
author = {R. Song and R. Xu and A. Festag and J. Ma and A. Knoll},
url = {https://ieeexplore.ieee.org/document/10236488},
doi = {10.1109/TIV.2023.3310674},
year = {2023},
date = {2023-04-08},
urldate = {2023-04-04},
journal = {IEEE Transactions on Intelligent Vehicles},
volume = {9},
number = {1},
pages = {958-969},
abstract = {Bird's eye view (BEV) perception is becoming increasingly important in the field of autonomous driving. It uses multi-view camera data to learn a transformer model that directly projects the perception of the road environment onto the BEV perspective. However, training a transformer model often requires a large amount of data, and as camera data for road traffic are often private, they are typically not shared. Federated learning offers a solution that enables clients to collaborate and train models without exchanging data but model parameters. In this paper, we introduce FedBEVT, a federated transformer learning approach for BEV perception. In order to address two common data heterogeneity issues in FedBEVT: (i) diverse sensor poses, and (ii) varying sensor numbers in perception systems, we propose two approaches - Federated Learning with Camera-Attentive Personalization (FedCaP) and Adaptive Multi-Camera Masking (AMCM), respectively. To evaluate our method in real-world settings, we create a dataset consisting of four typical federated use cases. Our findings suggest that FedBEVT outperforms the baseline approaches in all four use cases, demonstrating the potential of our approach for improving BEV perception in autonomous driving.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Song, R.; Xu, R.; Festag, A.; Ma, J.; Knoll, A.
FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems Journal Article
In: IEEE Transactions on Intelligent Vehicles, vol. 9, no. 1, pp. 958-969, 2023.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@article{Song:TIV:2024-FedBEVT,
title = {FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems},
author = {R. Song and R. Xu and A. Festag and J. Ma and A. Knoll},
url = {https://ieeexplore.ieee.org/document/10236488},
doi = {10.1109/TIV.2023.3310674},
year = {2023},
date = {2023-04-08},
urldate = {2023-04-04},
journal = {IEEE Transactions on Intelligent Vehicles},
volume = {9},
number = {1},
pages = {958-969},
abstract = {Bird's eye view (BEV) perception is becoming increasingly important in the field of autonomous driving. It uses multi-view camera data to learn a transformer model that directly projects the perception of the road environment onto the BEV perspective. However, training a transformer model often requires a large amount of data, and as camera data for road traffic are often private, they are typically not shared. Federated learning offers a solution that enables clients to collaborate and train models without exchanging data but model parameters. In this paper, we introduce FedBEVT, a federated transformer learning approach for BEV perception. In order to address two common data heterogeneity issues in FedBEVT: (i) diverse sensor poses, and (ii) varying sensor numbers in perception systems, we propose two approaches - Federated Learning with Camera-Attentive Personalization (FedCaP) and Adaptive Multi-Camera Masking (AMCM), respectively. To evaluate our method in real-world settings, we create a dataset consisting of four typical federated use cases. Our findings suggest that FedBEVT outperforms the baseline approaches in all four use cases, demonstrating the potential of our approach for improving BEV perception in autonomous driving.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Song, R.; Xu, R.; Festag, A.; Ma, J.; Knoll, A
FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems Journal Article
In: arXiv, 2023, (arXiv preprint arXiv:2304.01534).
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@article{Song:ARXIV:2023-FedBEVT,
title = {FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems},
author = {R. Song and R. Xu and A. Festag and J. Ma and A Knoll},
url = {https://arxiv.org/pdf/2304.01534},
year = {2023},
date = {2023-04-08},
urldate = {2023-04-04},
journal = {arXiv},
abstract = {Bird's eye view (BEV) perception is becoming increasingly important in the field of autonomous driving. It uses multi-view camera data to learn a transformer model that directly projects the perception of the road environment onto the BEV perspective. However, training a transformer model often requires a large amount of data, and as camera data for road traffic are often private, they are typically not shared. Federated learning offers a solution that enables clients to collaborate and train models without exchanging data but model parameters. In this paper, we introduce FedBEVT, a federated transformer learning approach for BEV perception. In order to address two common data heterogeneity issues in FedBEVT: (i) diverse sensor poses, and (ii) varying sensor numbers in perception systems, we propose two approaches - Federated Learning with Camera-Attentive Personalization (FedCaP) and Adaptive Multi-Camera Masking (AMCM), respectively. To evaluate our method in real-world settings, we create a dataset consisting of four typical federated use cases. Our findings suggest that FedBEVT outperforms the baseline approaches in all four use cases, demonstrating the potential of our approach for improving BEV perception in autonomous driving.},
note = {arXiv preprint arXiv:2304.01534},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Song, R.; Zhou, L.; Lyu, L.; Festag, A.; Knoll, A.
ResFed: Communication Efficient Federated Learning by Transmitting Deep Compressed Residuals Journal Article
In: arXiv, 2022, (arXiv:2212.05602 [cs.LG]).
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@article{Song:ARXIV:2022,
title = {ResFed: Communication Efficient Federated Learning by Transmitting Deep Compressed Residuals},
author = {R. Song and L. Zhou and L. Lyu and A. Festag and A. Knoll},
url = {https://arxiv.org/abs/2212.05602},
year = {2022},
date = {2022-12-22},
urldate = {2022-12-11},
journal = {arXiv},
abstract = {Federated learning enables cooperative training among massively distributed clients by sharing their learned local model parameters. However, with increasing model size, deploying federated learning requires a large communication bandwidth, which limits its deployment in wireless networks. To address this bottleneck, we introduce a residual-based federated learning framework (ResFed), where residuals rather than model parameters are transmitted in communication networks for training. In particular, we integrate two pairs of shared predictors for the model prediction in both server-to-client and client-to-server communication. By employing a common prediction rule, both locally and globally updated models are always fully recoverable in clients and the server. We highlight that the residuals only indicate the quasi-update of a model in a single inter-round, and hence contain more dense information and have a lower entropy than the model, comparing to model weights and gradients. Based on this property, we further conduct lossy compression of the residuals by sparsification and quantization and encode them for efficient communication. The experimental evaluation shows that our ResFed needs remarkably less communication costs and achieves better accuracy by leveraging less sensitive residuals, compared to standard federated learning. For instance, to train a 4.08 MB CNN model on CIFAR-10 with 10 clients under non-independent and identically distributed (Non-IID) setting, our approach achieves a compression ratio over 700X in each communication round with minimum impact on the accuracy. To reach an accuracy of 70%, it saves around 99% of the total communication volume from 587.61 Mb to 6.79 Mb in up-streaming and to 4.61 Mb in down-streaming on average for all clients.},
note = {arXiv:2212.05602 [cs.LG]},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {article}
}
Song, R.; Zhou, L.; Lakshminarasimhan, V.; Festag, A.; Knoll, A.
Federated Learning Framework Coping with Hierarchical Heterogeneity in Cooperative ITS Proceedings Article
In: IEEE International Intelligent Transportation Systems Conference (ITSC), pp. 3502-3508, Macau, China, 2022.
Abstract | Links | BibTeX | Tags: federated learning, Intelligent Transport Systems
@inproceedings{Song:ITSC:2022,
title = {Federated Learning Framework Coping with Hierarchical Heterogeneity in Cooperative ITS},
author = {R. Song and L. Zhou and V. Lakshminarasimhan and A. Festag and A. Knoll},
doi = {10.1109/ITSC55140.2022.9922064},
year = {2022},
date = {2022-06-22},
urldate = {2022-06-22},
booktitle = {IEEE International Intelligent Transportation Systems Conference (ITSC)},
pages = {3502-3508},
address = {Macau, China},
abstract = {In this paper, we introduce a federated learning framework coping with Hierarchical Heterogeneity (H2-Fed), which can notably enhance the conventional pre-trained deep learning model. The framework exploits data from connected public traffic agents in vehicular networks without affecting user data privacy. By coordinating existing traffic infrastructure, including roadside units and road traffic clouds, the model parameters are efficiently disseminated by vehicular communications and hierarchically aggregated. Considering the individual heterogeneity of data distribution, computational and communication capabilities across traffic agents and roadside units, we employ a novel method that addresses the heterogeneity of different aggregation layers of the framework architecture, i.e., aggregation in layers of roadside units and cloud. The experiment results indicate that our method can well balance the learning accuracy and stability according to the knowledge of heterogeneity in current communication networks. Compared to other baseline approaches, the evaluation on a Non-IID MNIST dataset shows that our framework is more general and capable especially in application scenarios with low communication quality. Even when 90% of the agents are timely disconnected, the pre-trained deep learning model can still be forced to converge stably, and its accuracy can be enhanced from 68% to over 90% after convergence.},
keywords = {federated learning, Intelligent Transport Systems},
pubstate = {published},
tppubtype = {inproceedings}
}