@misc{LuisZhuCostaetal., author = {Luis, Leonel and Zhu, Hong and Costa, Jo{\~a}o and Valls-Sol{\´e}, Josep and Brandt, Thomas and Zhou, Wu and Schneider, Erich}, title = {Reply to the commentary on luis et al. "spontaneous plugging of the horizontal semicircular canal with reversible canal dysfunction and recovery of vestibular evoked myogenic potentials}, series = {Otology \& Neurotology}, volume = {35}, journal = {Otology \& Neurotology}, number = {2}, issn = {1531-7129}, pages = {379 -- 383}, language = {en} } @misc{YiWuXietal., author = {Yi, Chao and Wu, Shunxiang and Xi, Bin and Ming, Daodong and Zhang, Yisong and Zhou, Zhenwen}, title = {Terrorist Video Detection System Based on Faster R-CNN and LightGBM}, series = {CSAE '20: Proceedings of the 4th International Conference on Computer Science and Application Engineering}, journal = {CSAE '20: Proceedings of the 4th International Conference on Computer Science and Application Engineering}, isbn = {978-1-4503-7772-0}, doi = {10.1145/3424978.3425121}, pages = {1 -- 8}, abstract = {Nowadays the mobile phone has become an indispensable tool in the lives of many people. While facilitating people's lives, it also provides criminals with a very important tool for spreading the terrorist video. Traditional manual detection of the terrorist video has the problem of low accuracy and inefficiency. To address the issue, this paper proposes a terrorist video detection system based on Light Gradient Boosting Machine (LightGBM) and Faster Region-based Convolutional Neural Network (Faster R-CNN) for mobile phone forensics system, which is used to quickly detect whether there is a terrorist video in the suspect's mobile phone. The system uses a multi-model method for detection, which includes preliminary detection and deep detection in two stages. Experimental research shows that it can effectively and accurately detect terrorist videos in mobile phones, thereby helping criminal investigation personnel to quickly grasp criminal evidence and provide some clues for the detection of the case.}, language = {en} } @misc{WenShaikhSteueretal., author = {Wen, Shuyu and Shaikh, Mohd Saif and Steuer, Oliver and Prucnal, Slawomir and Grenzer, J{\"o}rg and H{\"u}bner, Ren{\´e} and Turek, Marcin and Pyszniak, Krzysztof and Reiter, Sebastian and Fischer, Inga Anita and Georgiev, Yordan M. and Helm, Manfred and Wu, Shaoteng and Luo, Jun-Wei and Zhou, Shengqiang and Berenc{\´e}n, Yonder}, title = {Room-temperature extended short-wave infrared GeSn photodetectors realized by ion beam techniques}, series = {Applied Physics Letters}, volume = {123}, journal = {Applied Physics Letters}, number = {8}, issn = {0003-6951}, doi = {10.1063/5.0166799}, pages = {1 -- 7}, abstract = {GeSn alloys hold great promise as high-performance, low-cost, near- and short-wavelength infrared photodetectors with the potential to replace the relatively expensive and currently market-dominant InGaAs- and InSb-based photodetectors. In this Letter, we demonstrate room-temperature GeSn pn photodetectors fabricated by a complementary metal-oxide-semiconductor compatible process, involving Sn and P ion implantation and flash-lamp annealing prior to device fabrication. The fabrication process enables the alloying of Ge with Sn at concentrations up to 4.5\% while maintaining the high-quality single-crystalline structure of the material. This allows us to create Ge0.955Sn0.045 pn photodetectors with a low dark current density of 12.8 mA/cm2 and a relatively high extended responsivity of 0.56 A/W at 1.71 μm. These results pave the way for the implementation of a cost-effective, scalable, and CMOS-compatible short-wavelength infrared detector technology.}, language = {en} } @misc{BertoHuaParketal., author = {Berto, Federico and Hua, Chuanbo and Park, Junyoung and Luttmann, Laurin and Ma, Yining and Bu, Fanchen and Wang, Jiarui and Ye, Haoran and Kim, Minsu and Choi, Sanghyeok and Zepeda, Nayeli Gast and Hottung, Andr{\´e} and Zhou, Jianan and Bi, Jieyi and Hu, Yu and Liu, Fei and Kim, Hyeonah and Son, Jiwoo and Kim, Haeyeon and Angioni, Davide and Kool, Wouter and Cao, Zhiguang and Zhang, Qingfu and Kim, Joungho and Zhang, Jie and Shin, Kijung and Wu, Cathy and Ahn, Sungsoo and Song, Guojie and Kwon, Changhyun and Tierney, Kevin and Xie, Lin and Park, Jinkyoo}, title = {RL4CO : an extensive reinforcement learning for combinatorial optimization benchmark}, series = {KDD '25 : proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2}, journal = {KDD '25 : proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {979-8-4007-1454-2}, doi = {10.1145/3711896.3737433}, pages = {5278 -- 5289}, abstract = {Combinatorial optimization (CO) is fundamental to several real-world applications, from logistics and scheduling to hardware design and resource allocation. Deep reinforcement learning (RL) has recently shown significant benefits in solving CO problems, reducing reliance on domain expertise and improving computational efficiency. However, the absence of a unified benchmarking framework leads to inconsistent evaluations, limits reproducibility, and increases engineering overhead, raising barriers to adoption for new researchers. To address these challenges, we introduce RL4CO, a unified and extensive benchmark with in-depth library coverage of 27 CO problem environments and 23 state-of-the-art baselines. Built on efficient software libraries and best practices in implementation, RL4CO features modularized implementation and flexible configurations of diverse environments, policy architectures, RL algorithms, and utilities with extensive documentation. RL4CO helps researchers build on existing successes while exploring and developing their own designs, facilitating the entire research process by decoupling science from heavy engineering. We finally provide extensive benchmark studies to inspire new insights and future work. RL4CO has already attracted numerous researchers in the community and is open-sourced at https://github.com/ai4co/rl4co.}, language = {en} }