@article{TodtLameckerRammetal.2014, author = {Todt, Ingo and Lamecker, Hans and Ramm, Heiko and Frenzel, Henning and Wollenberg, Barbara and Beleites, Thomas and Zahnert, Thomas and Thomas, Jan Peter and Dazert, Stefan and Ernst, Arneborg}, title = {Entwicklung eines CT-Daten-basierten Vibrant-Bonebridge-Viewers}, volume = {62}, journal = {HNO}, publisher = {Springer}, doi = {10.1007/s00106-014-2851-3}, pages = {439 -- 442}, year = {2014}, abstract = {Die Positionierung des B-FMT der Vibrant Bonebridge kann aufgrund der anatomischen Verh{\"a}ltnisse des Mastoids und der Gr{\"o}ße des Aktuators ohne eine vorherige Beurteilung der individuellen Computertomographie (CT) des Felsenbeins problematisch sein. Die Entwicklung eines einfach zu bedienenden Viewers, welcher eine Positionierung des B-FMT im Felsenbeinmodell erm{\"o}glicht und hier auf individuelle potenzielle anatomische Konflikte hinweist sowie L{\"o}sungsm{\"o}glichkeiten anbietet, kann ein hilfreiches Werkzeug zur pr{\"a}operativen Positionierung sein. Ziel der Arbeit war die Definition von Anforderungen und die Anfertigung eines Prototyps eines Vibrant-Bonebridge-Viewers. Auf der Basis einer ZIBAmira-Software-Version und der Inklusion eines B-FMT-Modells unter Erstellung eines Felsenbeinmodells, welches die intuitive Beurteilung von Konflikten erm{\"o}glicht, erfolgte die Erstellung des Prototyps eines Vibrant-Bonebridge-Viewers.Ergebnisse. Die Segmentierungszeit der individuellen DICOM-Daten („digital imaging and communications in medicine") betr{\"a}gt etwa 5 min. Eine Positionierung im individuellen 3-D-Felsenbeinmodell erm{\"o}glicht die quantitative und qualitative Beurteilung von Konflikten (Sinus sigmoideus, mittlere Sch{\"a}delgrube) und das Aufsuchen einer bevorzugten Position. Das Anheben des B-FMT mittels virtueller Unterlegscheiben kann simuliert werden. Der erstellte Vibrant-Bonebridge-Viewer erm{\"o}glicht verl{\"a}sslich eine Simulation der B-FMT-Positionierung. Die klinische Anwendbarkeit muss evaluiert werden.}, language = {de} } @article{TodtLameckerRammetal.2014, author = {Todt, Ingo and Lamecker, Hans and Ramm, Heiko and Ernst, Arneborg}, title = {A computed tomographic data-based vibrant bonebridge visualization tool}, volume = {15}, journal = {Cochlear Implants International}, number = {S1}, doi = {10.1179/1467010014Z.000000000155}, pages = {72 -- 74}, year = {2014}, abstract = {Information about the temporal bone size and variations of anatomical structures are crucial for a safe positioning of the Vibrant Bonebridge B-FMT. A radiological based preoperative planning of the surgical procedure decreases the surgical time and minimizes the risk of complications. We developed a software tool, which allows a catch up of foreign DICOM data based CT temporal bone scans. The individual CT scan is transmitted into a 3D reconstructed pattern of the temporal bone. In this 3D reconstruction the individually favored position of the B- FMT should be found. The software allows a determination of a safe B-FMT position by identifying the individual relation of middle fossa, jugular bulb and external auditory canal. Skull thickness and screw length are contained parameters for the surgical planning. An easy to handle software tool allows a radiologically data based safe and fast surgical positioning of the B-FMT.}, language = {en} } @inproceedings{EhlkeFrenzelRammetal.2015, author = {Ehlke, Moritz and Frenzel, Thomas and Ramm, Heiko and Shandiz, Mohsen Akbari and Anglin, Carolyn and Zachow, Stefan}, title = {Towards Robust Measurement Of Pelvic Parameters From AP Radiographs Using Articulated 3D Models}, booktitle = {Computer Assisted Radiology and Surgery (CARS)}, year = {2015}, abstract = {Patient-specific parameters such as the orientation of the acetabulum or pelvic tilt are useful for custom planning for total hip arthroplasty (THA) and for evaluating the outcome of surgical interventions. The gold standard in obtaining pelvic parameters is from three-dimensional (3D) computed tomography (CT) imaging. However, this adds time and cost, exposes the patient to a substantial radiation dose, and does not allow for imaging under load (e.g. while the patient is standing). If pelvic parameters could be reliably derived from the standard anteroposterior (AP) radiograph, preoperative planning would be more widespread, and research analyses could be applied to retrospective data, after a postoperative issue is discovered. The goal of this work is to enable robust measurement of two surgical parameters of interest: the tilt of the anterior pelvic plane (APP) and the orientation of the natural acetabulum. We present a computer-aided reconstruction method to determine the APP and natural acetabular orientation from a single, preoperative X-ray. It can easily be extended to obtain other important preoperative and postoperative parameters solely based on a single AP radiograph.}, language = {en} } @article{ZahnGrotjohannRammetal.2015, author = {Zahn, Robert and Grotjohann, Sarah and Ramm, Heiko and Zachow, Stefan and Putzier, Michael and Perka, Carsten and Tohtz, Stephan}, title = {Pelvic tilt compensates for increased acetabular anteversion}, volume = {40}, journal = {International Orthopaedics}, number = {8}, doi = {10.1007/s00264-015-2949-6}, pages = {1571 -- 1575}, year = {2015}, abstract = {Pelvic tilt determines functional orientation of the acetabulum. In this study, we investigated the interaction of pelvic tilt and functional acetabular anteversion (AA) in supine position.}, language = {en} } @article{ZahnGrotjohannRammetal.2016, author = {Zahn, Robert and Grotjohann, Sarah and Ramm, Heiko and Zachow, Stefan and Pumberger, Matthias and Putzier, Michael and Perka, Carsten and Tohtz, Stephan}, title = {Influence of pelvic tilt on functional acetabular orientation}, volume = {25}, journal = {Technology and Health Care}, number = {3}, publisher = {IOS Press}, doi = {10.3233/THC-161281}, pages = {557 -- 565}, year = {2016}, language = {en} } @article{LiPimentelSzengeletal.2021, author = {Li, Jianning and Pimentel, Pedro and Szengel, Angelika and Ehlke, Moritz and Lamecker, Hans and Zachow, Stefan and Estacio, Laura and Doenitz, Christian and Ramm, Heiko and Shi, Haochen and Chen, Xiaojun and Matzkin, Franco and Newcombe, Virginia and Ferrante, Enzo and Jin, Yuan and Ellis, David G. and Aizenberg, Michele R. and Kodym, Oldrich and Spanel, Michal and Herout, Adam and Mainprize, James G. and Fishman, Zachary and Hardisty, Michael R. and Bayat, Amirhossein and Shit, Suprosanna and Wang, Bomin and Liu, Zhi and Eder, Matthias and Pepe, Antonio and Gsaxner, Christina and Alves, Victor and Zefferer, Ulrike and von Campe, Cord and Pistracher, Karin and Sch{\"a}fer, Ute and Schmalstieg, Dieter and Menze, Bjoern H. and Glocker, Ben and Egger, Jan}, title = {AutoImplant 2020 - First MICCAI Challenge on Automatic Cranial Implant Design}, volume = {40}, journal = {IEEE Transactions on Medical Imaging}, number = {9}, issn = {0278-0062}, doi = {10.1109/TMI.2021.3077047}, pages = {2329 -- 2342}, year = {2021}, abstract = {The aim of this paper is to provide a comprehensive overview of the MICCAI 2020 AutoImplant Challenge. The approaches and publications submitted and accepted within the challenge will be summarized and reported, highlighting common algorithmic trends and algorithmic diversity. Furthermore, the evaluation results will be presented, compared and discussed in regard to the challenge aim: seeking for low cost, fast and fully automated solutions for cranial implant design. Based on feedback from collaborating neurosurgeons, this paper concludes by stating open issues and post-challenge requirements for intra-operative use.}, language = {en} } @article{SekuboyinaHusseiniBayatetal.2021, author = {Sekuboyina, Anjany and Husseini, Malek E. and Bayat, Amirhossein and L{\"o}ffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kukačka, Jan and Payer, Christian and Štern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol{\´a}s P{\´e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, {\´E}lodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H. and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J. and Mumme, Raymond P. and Court, Laurence E. and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huynh, L{\^e} Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C. and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj{\"o}rn H. and Kirschke, Jan S.}, title = {VerSe: A Vertebrae labelling and segmentation benchmark for multi-detector CT images}, volume = {73}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2021.102166}, year = {2021}, abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.}, language = {en} }