@article{WenningerBayerlSchmidtetal.2019, author = {Wenninger, Marc and Bayerl, Sebastian P. and Schmidt, Jochen and Riedhammer, Korbinian}, title = {Timage - A Robust Time Series Classification Pipeline}, series = {Artificial Neural Networks and Machine Learning - ICANN 2019: Text and Time Series. ICANN 2019. Lecture Notes in Computer Science}, volume = {11730}, journal = {Artificial Neural Networks and Machine Learning - ICANN 2019: Text and Time Series. ICANN 2019. Lecture Notes in Computer Science}, publisher = {Springer}, address = {Cham}, year = {2019}, abstract = {Time series are series of values ordered by time. This kind of data can be found in many real world settings. Classifying time series is a difficult task and an active area of research. This paper investigates the use of transfer learning in Deep Neural Networks and a 2D representation of time series known as Recurrence Plots. In order to utilize the research done in the area of image classification, where Deep Neural Networks have achieved very good results, we use a Residual Neural Networks architecture known as ResNet. As preprocessing of time series is a major part of every time series classification pipeline, the method proposed simplifies this step and requires only few parameters. For the first time we propose a method for multi time series classification: Training a single network to classify all datasets in the archive with one network. We are among the first to evaluate the method on the latest 2018 release of the UCR archive, a well established time series classification benchmarking dataset.}, language = {en} } @article{WenningerStecherSchmidt2019, author = {Wenninger, Marc and Stecher, Dominik and Schmidt, Jochen}, title = {SVM-Based Segmentation of Home Appliance Energy Measurements}, series = {Proceedings 8th IEEE International Conference on Machine Learning and Applications -ICMLA 2019}, journal = {Proceedings 8th IEEE International Conference on Machine Learning and Applications -ICMLA 2019}, pages = {1666 -- 1670}, year = {2019}, abstract = {Generating a more detailed understanding of domestic electricity demand is a major topic for energy suppliers and householders in times of climate change. Over the years there have been many studies on consumption feedback systems to inform householders, disaggregation algorithms for Non-Intrusive-Load-Monitoring (NILM), Real-Time-Pricing (RTP) to promote supply aware behavior through monetary incentives and appliance usage prediction algorithms. While these studies are vital steps towards energy awareness, one of the most fundamental challenges has not yet been tackled: Automated detection of start and stop of usage cycles of household appliances. We argue that most research efforts in this area will benefit from a reliable segmentation method to provide accurate usage information. We propose a SVM-based segmentation method for home appliances such as dishwashers and washing machines. The method is evaluated using manually annotated electricity measurements of five different appliances recorded over two years in multiple households.}, language = {en} } @book{Schmidt2019, author = {Schmidt, Jochen}, title = {Grundkurs Informatik - Das {\"U}bungsbuch: 148 Aufgaben mit L{\"o}sungen}, edition = {1}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3658259440}, publisher = {Technische Hochschule Rosenheim}, pages = {196}, year = {2019}, language = {de} } @inproceedings{GoellerWenningerSchmidt2018, author = {Goeller, Toni and Wenninger, Marc and Schmidt, Jochen}, title = {Towards Cost-Effective Utility Business Models - Selecting a Communication Architecture for the Rollout of New Smart Energy Services}, series = {Proceedings of the 7th International Conference on Smart Cities and Green ICT Systems - Volume 1: SMARTGREENS}, booktitle = {Proceedings of the 7th International Conference on Smart Cities and Green ICT Systems - Volume 1: SMARTGREENS}, publisher = {SciTePress}, isbn = {978-989-758-292-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:861-opus4-8332}, pages = {231 -- 237}, year = {2018}, abstract = {The IT architecture for meter reading and utility services is at the core of new business models and has a decisive role as an enabler for resource efficiency measures. The communication architecture used by those services has significant impact on cost, flexibility and speed of new service rollout. This article describes how the dominant system model for meter reading came about, what alternative models exist, and what trade-offs those models have for rollout of new services by different stakeholders. Control of a self learning home automation system by dynamic tariff information (Real-Time-Pricing) is given as an application example.}, language = {en} } @incollection{ArmengaudMacherMassoneretal.2018, author = {Armengaud, Erik and Macher, Georg and Massoner, Alexander and Frager, Sebastian and Adler, Rasmus and Schneider, Daniel and Longo, Simone and Melis, Massimiliano and Groppo, Riccardo and Villa, Federica and O'Leary, Padraig and Bambury, Kevin and Anita, Finnegan and Zeller, Marc and H{\"o}fig, Kai and Papadopoulos, Yiannis and Hawkins, Richard and Kelly, Tim}, title = {DEIS: Dependability Engineering Innovation for Industrial CPS}, series = {Advanced Microsystems for Automotive Applications 2017}, booktitle = {Advanced Microsystems for Automotive Applications 2017}, publisher = {Springer}, address = {Cham}, publisher = {Technische Hochschule Rosenheim}, pages = {151 -- 163}, year = {2018}, abstract = {The open and cooperative nature of Cyber-Physical Systems (CPS) poses new challenges in assuring dependability. The DEIS project (Dependability Engineering Innovation for automotive CPS. This project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 732242, see http://www.deis-project.eu) addresses these challenges by developing technologies that form a science of dependable system integration. In the core of these technologies lies the concept of a Digital Dependability Identity (DDI) of a component or system. DDIs are modular, composable, and executable in the field facilitating (a) efficient synthesis of component and system dependability information over the supply chain and (b) effective evaluation of this information in-the-field for safe and secure composition of highly distributed and autonomous CPS. The paper outlines the DDI concept and opportunities for application in four industrial use cases.}, language = {en} } @inproceedings{SchefflerGehdeSpaethetal.2017, author = {Scheffler, T. and Gehde, M. and Sp{\"a}th, M. and Karlinger, Peter}, title = {Nutzung von Werkzeug und Maschinendaten zur Ermittlung des Fließerhaltens}, series = {Technomer Chemnitz}, booktitle = {Technomer Chemnitz}, year = {2017}, language = {de} } @inproceedings{WenningerSchmidtGoeller2017, author = {Wenninger, Marc and Schmidt, Jochen and Goeller, Toni}, title = {Appliance Usage Prediction for the Smart Home with an Application to Energy Demand Side Management - And Why Accuracy is not a Good Performance Metric for this Problem.}, series = {6th International Conference on Smart Cities and Green ICT Systems (SMARTGREENS)}, booktitle = {6th International Conference on Smart Cities and Green ICT Systems (SMARTGREENS)}, year = {2017}, abstract = {Shifting energy peak load is a subject that plays a huge role in the currently changing energy market, where renewable energy sources no longer produce the exact amount of energy demanded. Matching demand to supply requires behavior Changes on the customerside, which can be achieved by incentives suchas Real-Time-Pricing (RTP). Various studies show that such incentives cannot be utilized without a complexity reduction, e.g., by smart home automation systems that inform the customer about possible savings or automatically schedule appliances to off-peak load phases. We propose a probabilistic appliance usage prediction based on historical energy data that can be used to identify the times of day where an appliance will be used and therefore make load shift recommendations that suite the customer's usage profile. A huge issue is how to provide a valid performance evaluation for this particular problem. We will argue why the commonly used accuracy metric is not suitable, and suggest to use other metrics like the area under the Receiver Operating Characteristic (ROC) curve, Matthews Correlation Coefficient (MCC) or F1-Score instead.}, language = {en} } @inproceedings{MoehrleBizikZelleretal.2017, author = {M{\"o}hrle, Felix and Bizik, Kai and Zeller, Marc and H{\"o}fig, Kai and Rothfelder, Martin and Liggesmeyer, Peter}, title = {A Formal Approach for Automating Compositional Safety Analysis Using Flow Type Annotations In Component Fault Trees}, series = {Proceedings of the 27th European Safety and Reliability Conference (ESREL): Safety and Reliability - Theory and Applications., Portorož, Slovenia: Taylor \& Francis (CRC Press).}, booktitle = {Proceedings of the 27th European Safety and Reliability Conference (ESREL): Safety and Reliability - Theory and Applications., Portorož, Slovenia: Taylor \& Francis (CRC Press).}, year = {2017}, abstract = {Safety assurance is a major challenge in the design of modern embedded systems that has become increasingly difficult in recent years. Growing system sizes and the rise of Cyber-Physical systems confront safety engineers with large sets of configurations to be analyzed. Current approaches are usually carried out at design time and do not address the need for automated assessments in the field. With Component Fault Trees (CFTs) there exists a component-based methodology that enables an efficient modular composition of safety artifacts. The combined model is a system-level CFT that can be analyzed by means of popular Fault Tree Analysis techniques that are widely accepted in the industry. However, when composing models, their interfacing elements must be connected manually which impedes the automation of the procedure. In this work, we introduce the notion of flow types that represent a particular kind of component interaction and define a taxonomy of related failure behavior. By annotating CFTs with types, a machine-readable vocabulary is provided that allows for an automated interconnection of their interfaces. This way, the automatic composition of models according to system architecture is enabled, allowing for automated safety assessments on system-level. We demonstrate the feasibility of our approach using an example ethylene vaporization unit.}, language = {en} } @inproceedings{ZellerHoefigSchwinn2017, author = {Zeller, Marc and H{\"o}fig, Kai and Schwinn, Jean-Pascal}, title = {ArChes—Automatic generation of component fault trees from continuous function charts}, series = {2017 IEEE 15th International Conference on Industrial Informatics (INDIN), July 2017, Emden, Germany.}, booktitle = {2017 IEEE 15th International Conference on Industrial Informatics (INDIN), July 2017, Emden, Germany.}, year = {2017}, abstract = {The growing size and complexity of software in embedded systems poses new challenges to the safety assessment of embedded control systems. In industrial practice, the control software is mostly treated as a black box during the system's safety analysis. The appropriate representation of the failure propagation of the software is a pressing need in order to increase the accuracy of safety analyses. However, it also increase the effort for creating and maintaining the safety analysis models (such as fault trees) significantly. In this work, we present a method to automatically generate Component Fault Trees from Continuous Function Charts. This method aims at generating the failure propagation model of the detailed software specification. Hence, control software can be included into safety analyses without additional manual effort required to construct the safety analysis models of the software. Moreover, safety analyses created during early system specification phases can be verified by comparing it with the automatically generated one in the detailed specification phased.}, language = {en} } @article{Frank2017, author = {Frank, Ludwig}, title = {The Binomial and Negative Binomial Distribution in Discrete Time Markov Chains}, series = {Markov Processes And Related Fields}, volume = {23}, journal = {Markov Processes And Related Fields}, number = {3}, pages = {377 -- 400}, year = {2017}, abstract = {The classical results of the binomial and negative binomial probability distribution are generalized by means of homogeneous Discrete Time Markov Chains to series of stochastically independent random trials. These have not only two possible outcomes but two groups of them -- different kinds of successes and failures with occurrence probabilities depending on the outcome of the previous trial. This generalization allows a uniform view of occupation time, first passage time and recurrence time. Our results are consequently derived and presented in matrix form, the probabilities as well as the moments. They can be applied to all Discrete Time Markov Chains, especially in computer capacity planning, performability and economics.}, language = {en} } @misc{Beneken2016, author = {Beneken, Gerd}, title = {http://www.sigs-datacom.de/fachzeitschriften/agile-sw-entwicklung-und-architektur-poster-2016.html}, editor = {Sigs- Datacom Verlag,}, year = {2016}, abstract = {Fachposter zur agilen Softwareentwicklung und -Architektur}, language = {de} } @book{Petkovic2016, author = {Petkovic, Dusan}, title = {6. Auflage: Microsoft SQL Server 2016: A Beginner's Guide}, edition = {6}, publisher = {McGraw- Hill Osborne Media}, address = {Columbus, Ohio}, isbn = {978-1-25-964179-4}, publisher = {Technische Hochschule Rosenheim}, year = {2016}, abstract = {Get up and running on Microsoft SQL Server 2016 in no time with help from this thoroughly revised, practical resource. The book offers thorough coverage of SQL management and development and features full details on the newest business intelligence, reporting, and security features. Filled with new real-world examples and hands-on exercises, Microsoft SQL Server 2016: A Beginner's Guide, Sixth Edition, starts by explaining fundamental relational database system concepts. From there, you will learn how to write Transact-SQL statements, execute simple and complex database queries, handle system administration and security, and use the powerful analysis and BI tools. XML, spatial data, and full-text search are also covered in this step-by-step tutorial. · Revised from the ground up to cover the latest version of SQL Server · Ideal both as a self-study guide and a classroom textbook · Written by a prominent professor and best-selling author}, language = {en} } @inproceedings{MoehrleZellerHoefigetal.2016, author = {M{\"o}hrle, Felix and Zeller, Marc and H{\"o}fig, Kai and Rothfelder, Martin and Liggesmeyer, Peter}, title = {Automating compositional safety analysis using a failure type taxonomy for component fault trees}, series = {Risk, Reliability and Safety: Innovating Theory and Practice: Proc. of ESREL}, booktitle = {Risk, Reliability and Safety: Innovating Theory and Practice: Proc. of ESREL}, pages = {1380 -- 1387}, year = {2016}, abstract = {Safety assurance is a major challenge in the design of today's complex embedded systems and future Cyber-physical systems. Changes in a system's architectural design invalidate former safety analyses and require a manual adaptation of related safety analysis models in order to restore consistency. In this work, we present an approach for automating the compositional assembly of Component Fault Trees by automatically generating mappings between their input and output failure modes. Therefore, we propose a taxonomy of failure types for annotating model elements and deriving a model of the failure propagation. This way, automatic and system-wide safety analyses can be executed and easily repeated after making modifications to the system's architecture. We demonstrate the feasibility of our approach using an example ethylene vaporization unit from an industrial domain.}, language = {en} } @inproceedings{ZellerHoefig2016, author = {Zeller, Marc and H{\"o}fig, Kai}, title = {INSiDER: Incorporation of system and safety analysis models using a dedicated reference model}, series = {2016 Annual Reliability and Maintainability Symposium (RAMS)}, booktitle = {2016 Annual Reliability and Maintainability Symposium (RAMS)}, pages = {1 -- 6}, year = {2016}, abstract = {In order to enable model-based, iterative design of safety-relevant systems, an efficient incorporation of safety and system engineering is a pressing need. Our approach interconnects system design and safety analysis models efficiently using a dedicated reference model. Since all information are available in a structured way, traceability between the model elements and consistency checks enable automated synchronization to guarantee that information within both kind of models are consistent during the development life-cycle.}, language = {en} } @misc{Hoefig2016, author = {H{\"o}fig, Kai}, title = {Automated Recertification of a Safety Critical System}, year = {2016}, abstract = {A method for automated recertification of a safety critical system with at least one altered functionality is provided. The method includes providing a failure propagation model of the safety critical system. The method also includes updating the failure propagation model of the safety critical system according to the at least one altered functionality using inner port dependency traces between inports and outports of a failure propagation model element representing the at least one altered functionality. The method includes calculating top events of the updated failure propagation model, and comparing the calculated top events with predetermined system requirements to recertify the safety critical system.}, language = {en} } @misc{Hoefig2016, author = {H{\"o}fig, Kai}, title = {Analyzing the availability of a system}, year = {2016}, abstract = {An apparatus and method for analyzing availability of a system including subsystems each having at least one failure mode with a corresponding failure effect on the system are provided. The apparatus includes a degraded mode tree generation unit configured to automatically generate a degraded mode tree. The degraded mode tree includes at least one degraded mode element representing a degraded system state of the system that deviates from a normal operation state of the system based on a predetermined generic system meta model stored in a database including Failure Mode and Effects Analysis elements representing subsystems, failure modes, failure effects, and diagnostic measures. The apparatus also includes a processor configured to evaluate the generated degraded mode tree for calculation of the availability of the system.}, language = {en} } @misc{HoefigZeller2016, author = {H{\"o}fig, Kai and Zeller, Marc}, title = {Automated Qualification of a Safety Critical System}, year = {2016}, abstract = {A method for automated qualification of a safety critical system including a plurality of components is provided. A functional safety behavior of each component is represented by an associated component fault tree element. The method includes automatically performing a failure port mapping of output failure modes to input failure modes of component fault tree elements based on a predetermined generic fault type data model stored in a database.}, language = {en} } @inproceedings{FariaRiedhammerJaninetal.2016, author = {Faria, Arlo and Riedhammer, Korbinian and Janin, Adam and Bauer, A.}, title = {REMEETING: Searchable Conversations}, series = {INTERSPEECH 2016, Annual Conference of the International Speech Communication Association (ISCA), IEEE Workshop on Spoken Language Technologies (SLT), San Francisco, USA, September 2016.}, booktitle = {INTERSPEECH 2016, Annual Conference of the International Speech Communication Association (ISCA), IEEE Workshop on Spoken Language Technologies (SLT), San Francisco, USA, September 2016.}, organization = {2016 ISCA}, year = {2016}, language = {en} } @book{ErnstSchmidtBeneken2016, author = {Ernst, H. and Schmidt, Jochen and Beneken, Gerd}, title = {Grundkurs Informatik: Grundlagen und Konzepte f{\"u}r die erfolgreiche IT-Praxis - Eine umfassende, praxisorientierte Einf{\"u}hrung}, publisher = {Springer Vieweg}, address = {Berlin}, publisher = {Technische Hochschule Rosenheim}, year = {2016}, abstract = {Zahlensysteme und bin{\"a}re Arithmetik Nachricht und Information Codierung und Datenkompression Verschl{\"u}sselung Schaltalgebra, Schaltnetze und Elemente der Computerhardware Rechnerarchitekturen Rechnernetze Betriebssysteme Datenbanken Automatentheorie und formale Sprachen Berechenbarkeit und Komplexit{\"a}t Suchen und Sortieren B{\"a}ume und Graphen prozedurale und objektorientierte Programmierung (C und Java) Anwendungsprogrammierung im Internet (HTML, CSS, JavaScript und PHP) Software-Engineering}, language = {de} } @book{BenekenErnstSchmidt2015, author = {Beneken, Gerd and Ernst, H. and Schmidt, Jochen}, title = {Grundkurs Informatik: Grundlagen und Konzepte f{\"u}r die erfolgreiche IT-Praxis - Eine umfassende, praxisorientierte Einf{\"u}hrung (Auflage von 2015)}, publisher = {Springer Vieweg}, address = {Berlin}, publisher = {Technische Hochschule Rosenheim}, year = {2015}, abstract = {Das Buch bietet eine umfassende und praxisorientierte Einf{\"u}hrung in die wesentlichen Grundlagen und Konzepte der Informatik. Es umfasst den Stoff, der typischerweise in den ersten Semestern eines Informatikstudiums vermittelt wird, vertieft Zusammenh{\"a}nge, die dar{\"u}ber hinausgehen und macht sie verst{\"a}ndlich. Die Themenauswahl orientiert sich an der langfristigen Relevanz f{\"u}r die praktische Anwendung. Praxisnah und aktuell werden die Inhalte f{\"u}r Studierende der Informatik und verwandter Studieng{\"a}nge sowie f{\"u}r im Beruf stehende Praktiker vermittelt. Die vorliegende f{\"u}nfte Auflage wurde grundlegend {\"u}berarbeitet und aktualisiert.}, language = {de} } @misc{Beneken2015, author = {Beneken, Gerd}, title = {PHP ist nur was f{\"u}r Hackerbuden? {\"U}ber die Bedeutung von PHP im Informatikstudium",}, editor = {Software \& Support Media GmbH,}, year = {2015}, abstract = {Vortrag zum Thema PHP im Informatikstudium}, language = {de} } @misc{Beneken2015, author = {Beneken, Gerd}, title = {Fail Better: Erfahrungen aus 12 Jahren Requirements Engineering in studentischen Projekten"}, editor = {Gesellschaft f{\"u}r Informatik,}, year = {2015}, abstract = {Vortrag zum Thema Requirements Engineering in studentischen Projekten}, language = {de} } @misc{Beneken2015, author = {Beneken, Gerd}, title = {Agiles Projektmanagement}, editor = {Sigs- Datacom Verlag,}, year = {2015}, abstract = {Fachposter zum Thema Agiles Projektmanagement}, language = {de} } @inproceedings{HoefigZellerHeilmann2015, author = {H{\"o}fig, Kai and Zeller, Marc and Heilmann, Reiner}, title = {ALFRED: a methodology to enable component fault trees for layered architectures}, series = {2015 41st Euromicro Conference on Software Engineering and Advanced Applications (SEAA), August 2015,Funchal, Portugal. IEEE.}, booktitle = {2015 41st Euromicro Conference on Software Engineering and Advanced Applications (SEAA), August 2015,Funchal, Portugal. IEEE.}, pages = {167 -- 176}, year = {2015}, abstract = {Identifying drawbacks or insufficiencies in terms of safety is important also in early development stages of safety critical systems. In industry, development artefacts such as components or units, are often reused from existing artefacts to save time and costs. When development artefacts are reused, their existing safety analysis models are an important input for an early safety assessment for the new system, since they already provide a valid model. Component fault trees support such reuse strategies by a compositional horizontal approach. But current development strategies do not only divide systems horizontally, e.g., By encapsulating different functionality into separate components and hierarchies of components, but also vertically, e.g. Into software and hardware architecture layers. Current safety analysis methodologies, such as component fault trees, do not support such vertical layers. Therefore, we present here a methodology that is able to divide safety analysis models into different layers of a systems architecture. We use so called Architecture Layer Failure Dependencies to enable component fault trees on different layers of an architecture. These dependencies are then used to generate safety evidence for the entire system and over all different architecture layers. A case study applies the approach to hardware and software layers.}, language = {en} } @inproceedings{HoefigZellerSchorp2015, author = {H{\"o}fig, Kai and Zeller, Marc and Schorp, Konstantin}, title = {Automated failure propagation using inner port dependency traces}, series = {2015 11th International ACM SIGSOFT Conference on Quality of Software Architectures (QoSA), Mai 2015, Montreal, QC, Canada.}, booktitle = {2015 11th International ACM SIGSOFT Conference on Quality of Software Architectures (QoSA), Mai 2015, Montreal, QC, Canada.}, pages = {123 -- 128}, year = {2015}, abstract = {Safety assurance is a major challenge in the design of complex embedded and Cyber-physical Systems. Especially, changes and adoptions during the design or run-time of an embedded system invalidate former safety analyses and require an adaptation of the system's safety analysis models. In this paper, we present a methodology to fill up empty safety analysis artifacts in component fault trees using so-called inner port dependency traces to describe failure propagation. Thus, enabling a imprecise but rapid safety analysis of an entire system at early development stages or during system run-time for the automated certification of Cyber-physical Systems. We evaluate our approach using case study from the automotive domain.}, language = {en} } @inproceedings{MoehrleZellerHoefigetal.2015, author = {M{\"o}hrle, Felix and Zeller, Marc and H{\"o}fig, Kai and Rothfelder, Martin and Liggesmeyer, Peter}, title = {Automated compositional safety analysis using component fault trees}, series = {Proceedings of the IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW 2015), November 2015, Gaithersburg, MD.}, booktitle = {Proceedings of the IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW 2015), November 2015, Gaithersburg, MD.}, pages = {152 -- 159}, year = {2015}, abstract = {Safety assurance is a major challenge in the design of today's complex embedded systems and future Cyber-physical systems. Especially changes in a system's architectural design invalidate former safety analyses and require an adaptation of related safety analysis models in order to restore consistency. In this work, we present an approach for automatically generating mappings between failure ports in compositional safety analysis models. This way, automatic and system-wide safety analyses are enabled that can be easily repeated after making modifications to the system's architecture. We demonstrate the feasibility of our approach using a case study from the automotive domain.}, language = {en} } @misc{GuoHoefig2015, author = {Guo, Zhensheng and H{\"o}fig, Kai}, title = {Integrated Model-Based Safety Analysis}, year = {2015}, abstract = {A method for integrated model-based safety analysis includes integrating a safety analysis model into a system development model of a safety-critical system. The system development model includes model components. The safety analysis model models a failure logic separately for each of the model components. The method includes representing dependencies among the model components with a design structure matrix. The design structure matrix represents each of the model components with a row and a column and shows dependencies between model components with corresponding entries. The method also includes sequencing the design structure matrix, and identifying at least one dependency loop and loop components in the sequenced design structure matrix. The loop components are part of the at least one dependency loop.}, language = {en} } @inproceedings{FariaRiedhammer2015, author = {Faria, Arlo and Riedhammer, Korbinian}, title = {REMEETING — Get More Out Of Meetings}, series = {INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 2015.}, booktitle = {INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 2015.}, year = {2015}, abstract = {Remeeting is a tool that helps you get more out of in-person meetings. Calendar integration and a special email address allow users to email agenda items prior to a certain meeting. A discrete notification at the time of the meeting reminds the user to start the recording. During the meeting, the user focuses on the conversation, or can add notes and photos if desired. After the meeting, every participant gets notified by an automated email that lists the participants along with automatically extracted keywords, notes and photos. This stimulates collaboration, and keeps follow-up contributions at a central place: Just reply to add further notes to the meeting. The resulting meeting "document" can be shared with others and reviewed using a web app that acts as a visual index to the meeting. This makes Remeeting the perfect tool for regular group meetings, standups and interviews, where people typically track progress and follow up on. Remeeting is leveraging, promoting and contributing to open source projects including kaldi and docker.}, language = {en} } @misc{Beneken2014, author = {Beneken, Gerd}, title = {Agiles Requirements Engineering}, editor = {Sigs- Datacom Verlag,}, year = {2014}, abstract = {Fachposter zum Thema Agiles Requirements Engineering}, language = {de} } @inproceedings{Jarz2014, author = {Jarz, Ewald}, title = {Review of the Austrian and German E-Governance Concept}, series = {VI-th International Scientific Conference "E-GOVERNANCE" jointly with the "Science Days - 2014" of TU-Sofia, 2014}, booktitle = {VI-th International Scientific Conference "E-GOVERNANCE" jointly with the "Science Days - 2014" of TU-Sofia, 2014}, editor = {TUS,}, pages = {16}, year = {2014}, language = {en} } @inproceedings{HoefigArmbrusterSchmidt2014, author = {H{\"o}fig, Kai and Armbruster, Michael and Schmidt, Reiner}, title = {A vehicle control platform as safety element out of context.}, series = {HiPEAC Computing Systems Week, Barcelona, Spain, May 2014.}, booktitle = {HiPEAC Computing Systems Week, Barcelona, Spain, May 2014.}, year = {2014}, language = {en} } @inproceedings{RiedhammerBockletOrozcoArroyaveetal.2014, author = {Riedhammer, Korbinian and Bocklet, Tobias and Orozco-Arroyave, Juan Rafael and N{\"o}th, Elmar}, title = {Semi-Automatic Calibration for Dereverberation by Spectral Subtraction for Continuous Speech Recognition}, series = {ITG Symposium on Speech Communication 2014, Erlangen.}, booktitle = {ITG Symposium on Speech Communication 2014, Erlangen.}, publisher = {VDE VERLAG GMBH}, year = {2014}, abstract = {In this article, we describe a semi-automatic calibration algorithm for dereverberation by spectral subtraction. We verify the method by a comparison to a manual calibration derived from measured room impulse responses (RIR). We conduct extensive experiments to understand the effect of all involved parameters and to verify values suggested in the literature. The experiments are performed on a text read by 31 speakers and recorded by a headset and three far-field microphones. Results are measured in terms of automatic speech recognition (ASR) performance using a 1-gram model to emphasize acoustic recognition performance. To accommodate for the acoustic change by dereverberation we apply supervised MAP adaptation to the hidden Markov model output probabilities. The combination of dereverberation and adaptation yields a relative improvement of about 35\% in terms of word error rate (WER) compared to the original signal.}, language = {en} } @inproceedings{GhahremaniBabaAliPoveyetal.2014, author = {Ghahremani, Pegah and BabaAli, Bagher and Povey, Daniel and Riedhammer, Korbinian and Trmal, Jan and Khudanpur, Sanjeev}, title = {A Pitch Extraction Algorithm Tuned for Automatic Speech Recognition}, series = {2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Florence, Italy, May 2014.}, booktitle = {2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Florence, Italy, May 2014.}, publisher = {IEEE}, year = {2014}, abstract = {In this paper we present an algorithm that produces pitch and probability-of-voicing estimates for use as features in automatic speech recognition systems. These features give large performance improvements on tonal languages for ASR systems, and even substantial improvements for non-tonal languages. Our method, which we are calling the Kaldi pitch tracker (because we are adding it to the Kaldi ASR toolkit), is a highly modified version of the getf0 (RAPT) algorithm. Unlike the original getf0 we do not make a hard decision whether any given frame is voiced or unvoiced; instead, we assign a pitch even to unvoiced frames while constraining the pitch trajectory to be continuous. Our algorithm also produces a quantity that can be used as a probability of voicing measure; it is based on the normalized autocorrelation measure that our pitch extractor uses. We present results on data from various languages in the BABEL project, and show a large improvement over systems without tonal features and systems where pitch and POV information was obtained from SAcC or getf0.}, language = {en} } @inproceedings{BockletMaierRiedhammeretal.2014, author = {Bocklet, Tobias and Maier, Andreas and Riedhammer, Korbinian and Eysholdt, Ulrich and N{\"o}th, Elmar}, title = {Erlangen-CLP: A Large Annotated Corpus of Speech from Children with Cleft Lip and Palate.}, series = {Language Resources and Evaluation Conference (LREC), Reykjavik, Iceland, May 2014.}, booktitle = {Language Resources and Evaluation Conference (LREC), Reykjavik, Iceland, May 2014.}, year = {2014}, abstract = {In this paper we describe Erlangen-CLP, a large speech database of children with Cleft Lip and Palate. More than 800 German children with CLP (most of them between 4 and 18 years old) and 380 age matched control speakers spoke the semi-standardized PLAKSS test that consists of words with all German phonemes in different positions. So far 250 CLP speakers were manually transcribed, 120 of these were analyzed by a speech therapist and 27 of them by four additional therapists. The tharapists marked 6 different processes/criteria like pharyngeal backing and hypernasality which typically occur in speech of people with CLP. We present detailed statistics about the the marked processes and the inter-rater agreement.}, language = {en} } @inproceedings{FariaRiedhammer2014, author = {Faria, Arlo and Riedhammer, Korbinian}, title = {REMEETING — Get More Out of Meetings}, series = {INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association, IEEE Workshop on Spoken Language Technologies (SLT), Singapore, September 2014.}, booktitle = {INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association, IEEE Workshop on Spoken Language Technologies (SLT), Singapore, September 2014.}, year = {2014}, language = {en} } @book{HammerschallBeneken2013, author = {Hammerschall, Ulrike and Beneken, Gerd}, title = {Software Requirements (Pearson Studium - IT)}, publisher = {Pearson Studium}, address = {Hallbergmoos}, publisher = {Technische Hochschule Rosenheim}, year = {2013}, abstract = {Das Lehrbuch Software Requirements von Ulrike Hammerschall und Gerd Beneken f{\"u}hrt in die Grundkonzepte des Requirements Engineering ein und zeigt anhand vieler anschaulicher Beispiele, wie man systematisch und methodisch bei der Ermittlung, Dokumentation, Spezifikation, Modellierung, Validierung und Verwaltung von Software Requirements vorgeht. Mit seinem Inhalt und didaktisch wertvollem Aufbau richtet sich das Buch an Studierende der Fachrichtung Informatik und Wirtschaftsinformatik, sowie aller verwandten Fachrichtungen, die sich mit den Themen Software Engineering oder Requirements Engineering besch{\"a}ftigen. Software Requirements sind die Anforderungen der Anwender an die Funktionalit{\"a}t eines geplanten Software-Systems. Requirements Engineering ist der Prozess zur methodischen Erhebung und Beschreibung der Anforderungen. Die Kunst eines guten Requirements Engineerings ist die Entwicklung einer stabilen Anforderungsbasis als zuverl{\"a}ssige Grundlage f{\"u}r die weitere Entwicklung der Software. Das vorliegende Buch f{\"u}hrt in die Grundkonzepte des Requirements Engineering ein und zeigt anhand vieler Beispiele, wie man systematisch und methodisch bei der Ermittlung, Dokumentation, Spezifikation, Modellierung, Validierung und Verwaltung von Software Requirements vorgeht. Ausf{\"u}hrliche Methodenbeschreibungen dienen zur Erl{\"a}uterung und ein durchg{\"a}ngiges Fallbeispiel hilft dem Leser die Anwendung der Methoden nachzuvollziehen. Mit Hilfe der {\"U}bungen am Ende jedes Kapitels, k{\"o}nnen die Methoden selbst einge{\"u}bt werden. Neben dem klassischen Dokument-getriebenen Requirements Engineering besch{\"a}ftigt sich das Buch mit den Methoden des agilen Requirements Engineering und vergleicht die beiden Ans{\"a}tze. Zus{\"a}tzlich bietet das Buch einen Blick {\"u}ber den Tellerrand und betrachtet die Schnittstellen des Requirements Engineerings zu anderen Teilprozessen im Entwicklungsprozess. Das Buch richtet sich an Studierende der Fachrichtung Informatik und Wirtschaftsinformatik, sowie aller verwandten Fachrichtungen, die sich mit den Themen Software Engineering oder Requirements Engineering besch{\"a}ftigen. - Der RE-Prozess, Vorgehen und Methodik. - Anforderungsermittlung, -dokumentation und -spezifikation. - Querschnittliche Aufgaben wie Validierung, Modellierung und Management von Anforderungen - Agiles RE, Vorgehen und Methodik. - Schnittstellen zu benachbarten Teilprozessen (Projektmanagement, Qualit{\"a}tsmanagement, Software-Architektur) sowie zum Usability Engineering. - Einf{\"u}hrung und Verbesserung des Requirements Engineering Prozesses in einer Organisation.}, language = {de} } @misc{Beneken2013, author = {Beneken, Gerd}, title = {Studienziel: Software Ingenieur(in), Studentische Projekte mit KMU}, editor = {BICCnet,}, year = {2013}, abstract = {Agenda: Studienziel Software-Ingenieur(in) � Projekte im Rahmen des Informatik-Studiums � Projektbeispiele -IT Partner in Forschungsprojekten Labor f{\"u}r Software-Technik -IT Partner in Forschungsprojekten -Einzelanfertigungen f{\"u}r genau einen Kunden -Unterst{\"u}tzung von Startups / Testen von Gesch{\"a}ftsideen -Projekte mit kleinen und mittleren Unternehmen � Zusammenarbeit mit FH: N{\"a}chste Schritte}, language = {de} } @inproceedings{JungHoefigDomisetal.2013, author = {Jung, Jessica and H{\"o}fig, Kai and Domis, Dominik and Jedlitschka, Andreas and Hiller, Martin}, title = {Experimental comparison of two safety analysis methods and its replication}, series = {2013 ACM / IEEE International Symposium on Empirical Software Engineering and Measurement, December 2013, Baltimore, MD, USA.}, booktitle = {2013 ACM / IEEE International Symposium on Empirical Software Engineering and Measurement, December 2013, Baltimore, MD, USA.}, isbn = {978-0-7695-5056-5}, pages = {223 -- 232}, year = {2013}, abstract = {(Background) Empirical Software Engineering (SE) strives to provide empirical evidence about the pros and cons of SE approaches. This kind of knowledge becomes relevant when the issue is whether to change from a currently employed approach to a new one or not. An informed decision is required and is particularly important in the development of safety-critical systems. For example, for the safety analysis of safety-critical embedded systems, methods such as Failure Mode and Effect Analysis (FMEA) and Fault Tree Analysis (FTA) are used. With the advent of model-based systems and software development, the question arises whether safety engineering methods should also be adopted. New technologies such as Component Integrated Fault Trees (CFT) come into play. Industry demands to know the benefits of these new methods over established ones such as Fault Trees (FT). (Methods) For the purpose of comparing CFT and FT with regard to the capabilities of the safety analysis methods (such as quality of the results) and to the participants' rating of the consistency, clarity, and maintainability of the methods, we designed a comparative study as a controlled experiment using a within-subject design. The experiment was run with seven academic staff members working towards their PhD. The study was replicated with eleven domain experts from industry. (Results) Although the analysis of the tasks' solutions showed that the use of CFT did not yield a significantly different number of correct or incorrect solutions, the participants rated the modeling capacities of CFT higher in terms of model consistency, clarity, and maintainability. (Conclusion) From this first evidence, we conclude that CFT have the potential of being beneficial for companies looking for a safety analysis approachfor projects using model-based development.}, language = {en} } @inproceedings{JungJedlitschkaHoefigetal.2013, author = {Jung, Jessica and Jedlitschka, Andreas and H{\"o}fig, Kai and Domis, Dominik and Hiller, Martin}, title = {A controlled experiment on component fault trees}, series = {International Conference on Computer Safety, Reliability, and Security (SAFECOMP 2013)}, booktitle = {International Conference on Computer Safety, Reliability, and Security (SAFECOMP 2013)}, publisher = {Springer}, address = {Berlin, Heidelberg}, pages = {285 -- 292}, year = {2013}, abstract = {In safety analysis for safety-critical embedded systems, methods such as FMEA and fault trees (FT) are strongly established in practice. However, the current shift towards model-based development has resulted in various new safety analysis methods, such as Component Integrated Fault Trees (CFT). Industry demands to know the benefits of these new methods. To compare CFT to FT, we conducted a controlled experiment in which 18 participants from industry and academia had to apply each method to safety modeling tasks from the avionics domain. Although the analysis of the solutions showed that the use of CFT did not yield a significantly different number of correct or incorrect solutions, the participants subjectively rated the modeling capacities of CFT significantly higher in terms of model consistency, clarity, and maintainability. The results are promising for the potential of CFT as a model-based approach.}, language = {en} } @inproceedings{WegmannFariaJaninetal.2013, author = {Wegmann, Steven and Faria, Arlo and Janin, Adam and Riedhammer, Korbinian and Morgan, Nelson}, title = {The Tao of ATWV: Probing the mysteries of keyword search performance}, series = {IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU), Olomouc, Czech Republic, Dezember 2013.}, booktitle = {IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU), Olomouc, Czech Republic, Dezember 2013.}, year = {2013}, abstract = {In this paper we apply diagnostic analysis to gain a deeper understanding of the performance of the the keyword search system that we have developed for conversational telephone speech in the IARPA Babel program. We summarize the Babel task, its primary performance metric, "actual term weighted value" (ATWV), and our recognition and keyword search systems. Our analysis uses two new oracle ATWV measures, a bootstrap-based ATWV confidence interval, and includes a study of the underpinnings of the large ATWV gains due to system combination. This analysis quantifies the potential ATWV gains from improving the number of true hits and the overall quality of the detection scores in our system's posting lists. It also shows that system combination improves our systems' ATWV via a small increase in the number of true hits in the posting lists.}, language = {en} } @inproceedings{RiedhammerGroppBockletetal.2013, author = {Riedhammer, Korbinian and Gropp, Martin and Bocklet, Tobias and H{\"o}nig, Florian and N{\"o}th, Elmar and Steidl, Stefan}, title = {LMELectures: A Multimedia Corpus of Academic Spoken English}, series = {First Workshop on Speech, Language and Audio in Multimedia (SLAM 2013), Marseille, France, August 2013, ISCA Archive.}, booktitle = {First Workshop on Speech, Language and Audio in Multimedia (SLAM 2013), Marseille, France, August 2013, ISCA Archive.}, pages = {102 -- 107}, year = {2013}, abstract = {This paper describes the acquisition, transcription and annotation of a multi-media corpus of academic spoken English, the LMELectures. It consists of two lecture se-ries that were read in the summer term 2009 at the com-puter science department of the University of Erlangen-Nuremberg, covering topics in pattern analysis, machine learning and interventional medical image processing. In total, about 40 hours of high-definition audio and video of a single speaker was acquired in a constant recording en-vironment. In addition to the recordings, the presentation slides are available in machine readable (PDF) format. The manual annotations include a suggested segmenta-tion into speech turns and a complete manual transcrip-tion that was done using BLITZSCRIBE2, a new tool for the rapid transcription. For one lecture series, the lecturer assigned key words to each recordings; one recording of that series was further annotated with a list of ranked key phrases by five human annotators each. The corpus is available for non-commercial purpose upon request.}, language = {en} } @inproceedings{RiedhammerHaiDoHieronymus2013, author = {Riedhammer, Korbinian and Hai Do, Van and Hieronymus, James}, title = {A Study on LVCSR and Keyword Search for Tagalog}, series = {INTERSPEECH 2013, 14th Annual Conference of the International Speech Communication Association (ISCA), Lyon, France, August 2013.}, booktitle = {INTERSPEECH 2013, 14th Annual Conference of the International Speech Communication Association (ISCA), Lyon, France, August 2013.}, year = {2013}, abstract = {We describe a state-of-the-art large vocabulary continuous speech recognition (LVCSR) and keyword search (KWS) system trained on roughly 70 hours of conversational telephone speech. Using the Kaldi speech recognition toolkit, we investigate several aspects: for the acoustic front-end, we analyze the use of mel-frequency cepstral coefficients (MFCC), pitch and probability-of-voicing (PoV), and deep neural network (DNN) bottleneck (BN) features, as well as their feature-level combination ("tandem"). For the acousticphonetic decision tree, we explore different hidden Markov model (HMM) topologies for the glottalization phoneme /?/ to model its typically short duration. For the acoustic model, we compare regular continuous HMM with a sort of multi-codebook subspace Gaussian mixture model (SGMM) that lead to an overall best word error rate (WER) of 58.7\% and 56.3\%, respectively. The KWS is implemented as a word lattice search, and is augmented by a syllable lattice back-up search to capture out-of-vocabulary keywords as well as misrecognized lexical surface forms due to ambiguous prefix and hyphenation rules.}, language = {en} } @inproceedings{BockletRiedhammerEysholdtetal.2013, author = {Bocklet, Tobias and Riedhammer, Korbinian and Eysholdt, Ulrich and N{\"o}th, Elmar}, title = {Automatic Phoneme Analysis in Children with Cleft Lip and Palate}, series = {2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Vancouver, BC, Canada, May 2013.}, booktitle = {2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Vancouver, BC, Canada, May 2013.}, publisher = {IEEE}, pages = {7572 -- 7576}, year = {2013}, abstract = {Cleft Lip and Palate (CLP) is among the most frequent congenital abnormalities. The impaired facial development affects the articulation, with different phonemes being impacted inhomogeneously among different patients. This work focuses on automatic phoneme analysis of children with CLP for a detailed diagnosis and therapy control. In clinical routine, the state-of-the-art evaluation is based on perceptual evaluations. Perceptual ratings act as ground-truth throughout this work, with the goal to build an automatic system that is as reliable as humans. We propose two different automatic systems focusing on modeling the articulatory space of a speaker: one system models a speaker by a GMM, the other system employs a speech recognition system and estimates fMLLR matrices for each speaker. SVR is then used to predict the perceptual ratings. We show that the fMLLR-based system is able to achieve automatic phoneme evaluation results that are in the same range as perceptual inter-rater-agreements.}, language = {en} } @inproceedings{ZengerSchmidtKroedel2013, author = {Zenger, A. and Schmidt, Jochen and Kr{\"o}del, M.}, title = {Towards the Intelligent Home: Using Reinforcement-Learning for Optimal Heating Control}, series = {Advances in Artificial Intelligence, Proceedings of the 36th Annual Conference on AI, volume 8077 of Lecture Notes in Artificial Intelligence}, booktitle = {Advances in Artificial Intelligence, Proceedings of the 36th Annual Conference on AI, volume 8077 of Lecture Notes in Artificial Intelligence}, publisher = {Springer}, year = {2013}, abstract = {We propose a reinforcement learning approach to heating control in home automation, that can acquire a set of rules enabling an agent to heat a room to the desired temperature at a defined time while conserving as much energy as possible. Experimental results are presented that show the feasibility of our method.}, language = {en} } @book{Petkovic2012, author = {Petkovic, Dusan}, title = {5. Auflage- Microsoft SQL Server 2012 A Beginner's Guide}, publisher = {McGraw- Hill Osborne Media}, address = {Columbus, Ohio}, isbn = {978-0-07-176160-4}, publisher = {Technische Hochschule Rosenheim}, year = {2012}, abstract = {Get Started on Microsoft SQL Server 2012 in No TimeLearn to use all of the powerful features available in SQL Server 2012 quickly and easily. Microsoft SQL Server 2012: A Beginner's Guide explains the fundamentals of each topic alongside examples and tutorials that walk you through real-world database tasks. Install SQL Server 2012, construct high-performance databases, use powerful Transact-SQL statements, create stored procedures and triggers, and execute simple and complex database queries. Performance tuning, Database Engine security, Business Intelligence, and XML are also covered. Set up, configure, and maintain SQL Server 20012 Build and manage database objects using Transact-SQL statements Create stored procedures and user-defined functionsOptimize database performance, availability, and reliabilityImplement solid security using authentication, encryption, and authorization Automate tasks using SQL Server AgentCreate reliable data backups and perform flawless system restoresUse all-new SQL Server 2012 Business Intelligence, development, and administration toolsLearn in detail the SQL Server XML technology (SQLXML)}, language = {en} } @techreport{Jarz2012, type = {Working Paper}, author = {Jarz, Ewald}, title = {Skriptum Gesch{\"a}ftsprozesse}, pages = {72}, year = {2012}, abstract = {In diesem Skriptum sind die wesentlichen Methoden zur Modellierung, Gestaltung, Planung, Steuerung und Automatisierung von Gesch{\"a}ftsprozessen zusammengefasst und erl{\"a}utert.}, language = {de} } @inproceedings{Hoefig2012, author = {H{\"o}fig, Kai}, title = {Failure-dependent timing analysis-a new methodology for probabilistic worst-case execution time analysis}, series = {International GI/ITG Conference on Measurement, Modelling, and Evaluation of Computing Systems and Dependability and Fault Tolerance.}, booktitle = {International GI/ITG Conference on Measurement, Modelling, and Evaluation of Computing Systems and Dependability and Fault Tolerance.}, publisher = {Springer}, address = {Berlin, Heidelberg}, pages = {61 -- 75}, year = {2012}, abstract = {Embedded real-time systems are growing in complexity, which goes far beyond simplistic closed-loop functionality. Current approaches for worst-case execution time (WCET) analysis are used to verify the deadlines of such systems. These approaches calculate or measure the WCET as a single value that is expected as an upper bound for a system's execution time. Overestimations are taken into account to make this upper bound a safe bound, but modern processor architectures expand those overestimations into unrealistic areas. Therefore, we present in this paper how of safety analysis model probabilities can be combined with elements of system development models to calculate a probabilistic WCET. This approach can be applied to systems that use mechanisms belonging to the area of fault tolerance, since such mechanisms are usually quantified using safety analyses to certify the system as being highly reliable or safe. A tool prototype implementing this approach is also presented which provides reliable safe upper bounds by performing a static WCET analysis and which overcomes the frequently encountered problem of dependence structures by using a fault injection approach.}, language = {en} } @inproceedings{RiedhammerGroppNoeth2012, author = {Riedhammer, Korbinian and Gropp, Martin and N{\"o}th, Elmar}, title = {The FAU Video Lecture Browser System}, series = {2012 IEEE Spoken Language Technology Workshop (SLT), Miami, FL, USA, December 2012.}, booktitle = {2012 IEEE Spoken Language Technology Workshop (SLT), Miami, FL, USA, December 2012.}, publisher = {IEEE}, pages = {392 -- 397}, year = {2012}, abstract = {A growing number of universities and other educational institutions provide recordings of lectures and seminars as an additional resource to the students. In contrast to educational films that are scripted, directed and often shot by film professionals, these plain recordings are typically not post-processed in an editorial sense. Thus, the videos often contain longer periods of inactivity or silence, unnecessary repetitions, or corrections of prior mistakes. This paper describes the FAU Video Lecture Browser system, a web-based platform for the interactive assessment of video lectures, that helps to close the gap between a plain recording and a useful e-learning resource by displaying automatically extracted and ranked key phrases on an augmented time line based on stream graphs. In a pilot study, users of the interface were able to complete a topic localization task about 29 \% faster than users provided with the video only while achieving about the same accuracy. The user interactions can be logged on the server to collect data to evaluate the quality of the phrases and rankings, and to train systems that produce customized phrase rankings.}, language = {en} } @inproceedings{HoenigBockletRiedhammeretal.2012, author = {H{\"o}nig, Florian and Bocklet, Tobias and Riedhammer, Korbinian and Batliner, Anton and N{\"o}th, Elmar}, title = {The Automatic Assessment of Non-native Prosody: Combining Classical Prosodic Analysis with Acoustic Modelling}, series = {INTERSPEECH 2012, 13th Annual Conference of the International Speech Communication Association (ISCA), Portland, OR, USA, September 2012.}, booktitle = {INTERSPEECH 2012, 13th Annual Conference of the International Speech Communication Association (ISCA), Portland, OR, USA, September 2012.}, year = {2012}, abstract = {In earlier studies, we assessed the degree of non-nativeness employing prosodic information. In this paper, we combine prosodic information with (1) features derived from a Gaussian Mixture Model used as Universal Background Model (GMM-UBM), a powerful approach used in speaker identification, and (2) openSMILE, a standard open-source toolkit for extracting acoustic features. We evaluate our approach with English speech from 94 non-native speakers. GMM-UBM or openSMILE modelling alone yields lower performance than our prosodic feature vector; however, adding information from the GMM-UBM modelling or openSMILE by late fusion improves results.}, language = {en} } @inproceedings{RiedhammerRingNoethetal.2012, author = {Riedhammer, Korbinian and Ring, Martin and N{\"o}th, Elmar and Kolb, Daniel}, title = {A Software Kit for Automatic Voice Descrambling}, series = {2012 IEEE International Conference on Communications (ICC); IEEE International Workshop on Security and Forensics in Communication Systems (SFCS).}, booktitle = {2012 IEEE International Conference on Communications (ICC); IEEE International Workshop on Security and Forensics in Communication Systems (SFCS).}, pages = {8349 -- 8353}, year = {2012}, abstract = {Voice scrambling is widely used to add privacy to the radio communication of various authorities - but is also used by criminals to evade prosecution. In this article, we consider various analog voice scrambling techniques such as fixed frequency inversion, splitband inversion and rolling code scramblers. We explain how to break them using automatically extracted measures and scoring algorithms, and evaluate the proposed system using simulated data. While the simple inversion can be easily broken, the more advanced techniques require additional work prior to unsupervised automatization; the presented user interface allows the user to refine the automatic results to obtain a high quality solution.}, language = {en} }