@article{71, author = {Karim Lekadir and Alejandro Frangi and Antonio Porras and Ben Glocker and Celia Cintas and Curtis Langlotz and Eva Weicken and Folkert Asselbergs and Fred Prior and Gary Collins and Georgios Kaissis and Gianna Tsakou and Irène Buvat and Jayashree Kalpathy-Cramer and John Mongan and Julia Schnabel and Kaisar Kushibar and Katrine Riklund and Kostas Marias and Lameck Amugongo and Lauren Fromont and Lena Maier-Hein and Leonor Cerdá-Alberich and Luis Martí-Bonmatí and Jorge Cardoso and Maciej Bobowicz and Mahsa Shabani and Manolis Tsiknakis and Maria Zuluaga and Marie-Christine Fritzsche and Marina Camacho and Marius Linguraru and Markus Wenzel and Marleen De Bruijne and Martin Tolsgaard and Melanie Goisauf and Mónica Abadía and Nikolaos Papanikolaou and Noussair Lazrak and Oriol Pujol and Richard Osuala and Sandy Napel and Sara Colantonio and Smriti Joshi and Stefan Klein and Susanna Aussó and Wendy Rogers and Zohaib Salahuddin and Martijn Starmans}, title = {FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare}, abstract = {Despite major advances in artificial intelligence (AI) research for healthcare, the deployment and adoption of AI technologies remain limited in clinical practice. This paper describes the FUTURE-AI framework, which provides guidance for the development and deployment of trustworthy AI tools in healthcare. The FUTURE-AI Consortium was founded in 2021 and comprises 117 interdisciplinary experts from 50 countries representing all continents, including AI scientists, clinical researchers, biomedical ethicists, and social scientists. Over a two year period, the FUTURE-AI guideline was Despite major advances in medical artificial intelligence (AI) research, clinical adoption of emerging AI solutions remains challenging owing to limited trust and ethical concerns The FUTURE-AI Consortium unites 117 experts from 50 countries to define international guidelines for trustworthy healthcare AI The FUTURE-AI framework is structured around six guiding principles: fairness, universality, traceability, usability, robustness, and explainability The guideline addresses the entire AI lifecycle, from design and development to validation and deployment, ensuring alignment with real world needs and ethical requirements The framework includes 30 detailed recommendations for building trustworthy and deployable AI systems, emphasising multistakeholder collaboration Continuous risk assessment and mitigation are fundamental, addressing biases, data variations, and evolving challenges during the AI lifecycle FUTURE-AI is designed as a dynamic framework, which will evolve with technological advancements and stakeholder feedback the bmj | BMJ 2025;388:e081554 | doi: 10.1136/bmj-2024-081554 established through consensus based on six guiding principles—fairness, universality, traceability, usability, robustness, and explainability. To operationalise trustworthy AI in healthcare, a set of 30 best practices were defined, addressing technical, clinical, socioethical, and legal dimensions. The recommendations cover the entire lifecycle of healthcare AI, from design, development, and validation to regulation, deployment, and monitoring.}, year = {2025}, journal = {BMJ 2025;388:e081554}, volume = {388}, chapter = {1}, pages = {22}, month = {05th February 2025}, doi = {https://doi.org/10.1136/bmj-2024-081554}, }