@article{PetrilloFabbriKagklietal.2021, author = {Petrillo, Mauro and Fabbri, M and Kagkli, Dafni Maria and Querci, Maddalena and Van den Eede, Guy and Alm, Erik and Aytan-Aktug, Derya and Capella-Gutierrez, Salvador and Carrillo, Catherine and Cestaro, Alessandro and Chan, Kok-Gan and Coque, Teresa and Endrullat, Christoph and Gut, Ivo and Hammer, Paul and Kay, Gemma L. and Madec, Jean-Yves and Mather, Alison E. and McHardy, Alice Carolyn and Naas, Thierry and Paracchini, Valentina and Peter, Silke and Pightling, Arthur and Raffael, Barbara and Rossen, John and Rupp{\´e}, Etienne and Schlaberg, Robert and Vanneste, Kevin and Weber, Lukas M. and Westh, Henrik and Angers-Loustau, Alexandre}, title = {A roadmap for the generation of benchmarking resources for antimicrobial resistance detection using next generation sequencing [version 1; peer review: 2 approved with reservations]}, series = {F1000Research}, journal = {F1000Research}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:526-opus4-15699}, year = {2021}, abstract = {Next Generation Sequencing technologies significantly impact the field of Antimicrobial Resistance (AMR) detection and monitoring, with immediate uses in diagnosis and risk assessment. For this application and in general, considerable challenges remain in demonstrating sufficient trust to act upon the meaningful information produced from raw data, partly because of the reliance on bioinformatics pipelines, which can produce different results and therefore lead to different interpretations. With the constant evolution of the field, it is difficult to identify, harmonise and recommend specific methods for large-scale implementations over time. In this article, we propose to address this challenge through establishing a transparent, performance-based, evaluation approach to provide flexibility in the bioinformatics tools of choice, while demonstrating proficiency in meeting common performance standards. The approach is two-fold: first, a community-driven effort to establish and maintain "live" (dynamic) benchmarking platforms to provide relevant performance metrics, based on different use-cases, that would evolve together with the AMR field; second, agreed and defined datasets to allow the pipelines' implementation, validation, and quality-control over time. Following previous discussions on the main challenges linked to this approach, we provide concrete recommendations and future steps, related to different aspects of the design of benchmarks, such as the selection and the characteristics of the datasets (quality, choice of pathogens and resistances, etc.), the evaluation criteria of the pipelines, and the way these resources should be deployed in the community.}, language = {en} }