@phdthesis{Alsabbagh2023, author = {Alsabbagh, Wael}, title = {Investigating security issues in programmable logic controllers and related protocols}, doi = {10.26127/BTUOpen-6611}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-66114}, school = {BTU Cottbus - Senftenberg}, year = {2023}, abstract = {Programmable Logic Controllers (PLCs) are pivotal in Critical Infrastructures (CIs) and Industrial Control Systems (ICSs), governing processes in nuclear power plants, petrochemical factories, and water treatment systems. Despite their importance, PLCs are vulnerable to security threats, notably control logic injection attacks, aiming to sabotage physical processes. This thesis delves into PLC security, analyzing vulnerabilities in non-cryptographically and cryptographically protected PLCs, particularly Siemens S7-300 and S7-1500 models. Siemens, an automation market leader, utilizes S7-300 PLCs in millions of applications, reflecting the broader ICS security landscape. The S7-1500 line claims resistance to cyberattacks, including control logic injections. The thesis evaluates authentication in non-cryptographically protected PLCs, introducing a stealthy control logic injection attack scenario using an S7-300 PLC and S7Comm protocol. The second part explores integrity checks in cryptographically protected S7-1500 PLCs. Findings, encompassing disclosed vulnerabilities, lead to a severe control logic injection attack with a malicious interrupt block, conducted in an industrial setting using the S7-1500 and S7CommPlus protocol. The final segment focuses on Profinet protocol security and an injection attack scenario. The study demonstrates adversaries manipulating critical data without prior knowledge, causing harm to physical processes. A real-world attack on a Profinet-based system with two S7-300 PLCs is executed. The thesis concludes by proposing mitigation solutions, enhancing PLC and communication protocol security. This contribution elevates the security posture of millions of operating devices globally, advancing PLC security research.}, subject = {Programmable logic controllers; Control logic injection; Cyberattacks; Cybersecurity; False data injection; Speicherprogrammierbare Steuerungen; Control Logic Injection-Angriffe; Cyberangriffe; Cybersischerheit; False Data Injection-Angriffe; Kritische Infrastruktur; Kontrollsystem; Speicherprogrammierte Steuerung; SIMATIC S7-300; SIMATIC S7-1500; Computersicherheit; Cyberattacke}, language = {en} } @phdthesis{Dyka2013, author = {Dyka, Zoya}, title = {Analyse und Vorhersage des Fl{\"a}chen- und Energieverbrauches optimaler Hardware Polynom-Multiplizierer f{\"u}r GF(2ⁿ) f{\"u}r elliptische Kurven Kryptographie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-27240}, school = {BTU Cottbus - Senftenberg}, year = {2013}, abstract = {Die Anwendung asymmetrischer Kryptosysteme, z.B. elliptische Kurven Kryptographie (ECC), erfordert große Rechenkapazit{\"a}t die normalerweise auf von mobilen Ger{\"a}ten bzw. drahtlosen Sensorknoten nicht zur Verf{\"u}gung steht. Die Implementierung der ECC in Hardware reduziert den Zeit- und Energie-Aufwand. Die Optimierung der Hardware-Implementierungen dient nicht nur der weiteren Reduktion des Zeit- und Energieverbrauches sondern hilft dar{\"u}ber hinaus die Herstellungskosten zu verringern, so dass solche L{\"o}sungen auch f{\"u}r kosteng{\"u}nstige Ger{\"a}te einsetzbar werden. Im Rahmen dieser Dissertation wurden Optimierungsm{\"o}glichkeiten f{\"u}r die Multiplikation der Polynome, die f{\"u}r EC-Operationen eingesetzt werden, untersucht. Ziel der Optimierungen war, dass die Multiplikation mit einer minimalen Anzahl von Additionen (also XOR-Gattern) und Multiplikationen (also AND-Gattern) durchgef{\"u}hrt werden kann. Im Rahmen dieser Arbeit wurde die iterative Bearbeitung von 10 Multiplikations-Methoden (MM) im Gegensatz zur {\"u}blichen rekursiven Bearbeitung untersucht. Dabei wurde eine Reihenfolge der Operationen f{\"u}r jede der untersuchten MM ermittelt, die zu einer reduzierten Anzahl von XOR-Operationen f{\"u}hrt. Der Einsatz der optimierten Reihenfolge kann die Komplexit{\"a}t der MM wesentlich reduzieren. Zum Beispiel bei der generalisierten Karatsuba-MM [18] betr{\"a}gt die Reduktion des XOR-Aufwandes durchschnittlich 39 \% f{\"u}r Polynom-L{\"a}ngen bis 600 Bits. F{\"u}r die IHP 0,13μ-Technologie entspricht diese Reduktion des XOR-Aufwandes einer durchschnittlichen Fl{\"a}chen-Reduktion der Polynom-Multiplizierer um 35 \%. Bei der 4-Segment-Karatsuba-MM wird nicht nur der XOR-Aufwand, sondern auch die Signal-Verz{\"o}gerung im Vergleich zur rekursiven Anwendung der originalen Karatsuba-MM reduziert. Außerdem wurde ein Algorithmus zur Bestimmung einer fl{\"a}chen- und/oder energieoptimalen Kombination der Multiplikations-Methoden entwickelt. Mit dem vorgeschlagenen Algorithmus wurden die fl{\"a}chen- und die energie-optimalen Kombinationen der MM f{\"u}r Polynom-L{\"a}ngen bis 600 Bits bestimmt. Alle ECC-relevanten Polynom-L{\"a}ngen liegen in diesem Bereich. Die durchschnittliche Reduktion der Fl{\"a}chen im Vergleich zu den rekonstruierten Daten aus [30] betr{\"a}gt 12 \%. Zus{\"a}tzlich wurde ein energieoptimaler serieller Mehr-Takt-Multiplizierer f{\"u}r 233-Bits Polynome auf Basis Karatsuba-{\"a}hnlicher Multiplikations-Methoden entwickelt. Dieser Multiplizierer nutzt die Winograd-MM und basiert auf einen fl{\"a}chenoptimierten 78-Bits-Teil-Multiplizierer. Die theoretischen Ergebnisse wurden mit Hilfe von Synthesedaten f{\"u}r die IHP Technologie erfolgreich verifiziert. Der Energieverbrauch und die Ausf{\"u}hrungszeit des Designs sind um 24 \% bzw. 28 \% kleiner als die des Vergleichsdesigns aus [28].}, subject = {Hardwareentwurf; Kryptologie; Elliptische Kurven Kryptographie; GF(2ⁿ); Polynom-Multiplikation; Optimierung; Hardware Implementierung; Elliptic curve cryptography; GF(2ⁿ); Polynomial multiplication; Optimization; Hardware implementation}, language = {de} } @phdthesis{Brzozowski2012, author = {Brzozowski, Marcin}, title = {Energy-efficient means to support short end-to-end delays in wireless sensor networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-25299}, school = {BTU Cottbus - Senftenberg}, year = {2012}, abstract = {This work addresses tough challenges of sensor network applications with Quality of Service requirements. That is, nodes must work with batteries for a long time, support short end-to-end delays and robust communication in multi-hop networks. It starts with presenting previous research efforts that address such challenges. For instance, many Medium Access Control (MAC) protocols keep nodes mostly sleeping to save energy and synchronize wake-up times for communication. Although such protocols offer short end-to-end delays, they still suffer from long idle listening and shortened lifetimes. The main reasons are the long time needed to detect an idle channel and inefficient ways of dealing with clock drift. This work introduces novel solutions to these problems, mainly at Layer 2 of the OSI model, that significantly reduce idle listening. First, nodes predict future drift and reduce the time needed to compensate clock uncertainty among neighbors. Second, they quickly detect an idle channel and power down the transceiver. In some scenarios, nodes work 30\% longer owing to these solutions. To tackle problems with unreliable wireless links, sensor nodes may apply various solutions at Layer 2. For example, with Automatic Repeat reQuest (ARQ) protocol they send retries on frame losses, resulting in extra energy consumption. This work examines the impact of ARQ on the lifetime and on the reception rate. Several indoor and outdoor experiments showed that with only 1-2 retries nodes can handle many communication problems. Besides, owing to the idle-listening reduction, mentioned previously, ARQ shortens the lifetime by 10\% only. Although this work addresses particular applications, the solutions presented here can be used in other scenarios and with different protocols. For instance, the energy-efficient drift compensation approach can be directly used in any schedule-based MAC protocols, like the one based on the IEEE 802.15.4 standard. Besides, any protocol can benefit from the solution to the idle-listening reduction based on the early detection of idle channel. Finally, owing to the analytical model that estimates the lifetime of nodes, researches and developers can early evaluate MAC protocols running on various hardware platforms.}, subject = {Rechnernetz; Drahtloses Sensorsystem; Verteiltes System; Kommunikationsprotokoll; Drahtlose Kommunikation; Sensornetze; Kurze Latenzen; Uhrendrift; Medienzugriff; Wireless communication; Sensor networks; Short delay; Clock drift; MAC}, language = {en} } @misc{Vogel2018, type = {Master Thesis}, author = {Vogel, Elisabeth}, title = {Analyse von EM-Kartographie als Mittel zur Bestimmung von Leakage-Quellen sowie des Effektes geeigneter Gegenmaßnahmen}, doi = {10.26127/BTUOpen-6454}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-64545}, school = {BTU Cottbus - Senftenberg}, year = {2018}, abstract = {The Internet of Things (IoT) and Wireless Sensor Networks (WSNs) are essential for today's global information society, relying on small wireless devices for networking. When these devices' functionalities or data are manipulated, the potential damage is significant. Hence, securing data transmission between these devices using cryptography is crucial. However, the security of cryptographic algorithms depends on the secrecy of cryptographic keys, which can be vulnerable due to physical accessibility. Side-channel analysis attacks can exploit this vulnerability by using physical parameters associated with the operation of cryptographic chips, such as electromagnetic radiation during cryptographic operations. In response to this challenge, the Leakage Source Cartography Tool (LSC-Tool) was developed in this thesis to expedite the analysis of electromagnetic radiation in IHP's elliptic curve cryptography designs. The LSC-Tool enables automated evaluation of sets of electromagnetic traces, obtained from different measurement positions across a cryptographic chip. The analysis results in a leakage source map (LS-map) that displays the success of electromagnetic analysis attacks at each measurement point. This tool offers a cost-effective and rapid means to assess the resistance of cryptographic designs against attacks, providing designers with insights into the most vulnerable areas of the chip and information about leakage per clock cycle. By applying the LSC-Tool, the resistance of two IHP ECC designs against horizontal differential electromagnetic analysis attacks was tested across 25 measurement positions. The statistical analysis of traces can be conducted using three methods: the least squares method, the difference-of-means-test, or the difference-of-the-mean method. The generated LS-maps show that using different methods yields distinct leakage source indications. Combining these maps enhances the attack's success rate. Notably, during this research, it became evident that the LSC-Tool could be adapted to create LS-maps for the functional blocks of ECC designs, enabling the analysis of simulated power traces for IHP ECC designs.}, subject = {EM-Kartographie; Elliptische Kurven; Power Analysis Angriffe; Seitenkanalattacke; Fehleranalyse; Kryptosystem; Elektromagnetische Strahlung; Elliptische Kurve; Side-channel-analysis; Countermeasures; Power analysis}, language = {de} } @phdthesis{Ilić2023, author = {Ilić, Aleksandar}, title = {Strategies for increasing maximum throughput and reducing latency in tree-based WSNs}, doi = {10.26127/BTUOpen-6498}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-64986}, school = {BTU Cottbus - Senftenberg}, year = {2023}, abstract = {This thesis deals with strategies for achieving high data throughput in wireless sensor networks that use a time division multiple access (TDMA) scheme to resolve medium access. The thesis uses a multi-sided approach that deals not only with the scheduling algorithm but also with the network layer and the interference model. The thesis proposes four solutions that significantly improve data throughput, fairness, and latency in the considered scenario. The thesis starts with an overview of state-of-the-art medium access control (MAC) protocols, emphasizing TDMA. Based on this overview, it is concluded that not much space for improvement is left in the field of schedule calculation algorithms; many such algorithms are proposed up to date, and they can achieve schedule lengths close to the theoretical minimum. However, the research also reveals a lack of in-detail evaluation and comparison of these algorithms; this makes choosing the most suitable algorithm for a particular application hard and performance estimation inaccurate. Therefore, an extensive evaluation of state-of-the-art TDMA protocols using simulations and over 200 randomly generated networks was performed to tackle this issue. The results allow choosing an appropriate algorithm and estimating performance for each specific application. Next, the problem of multiple packet transmissions during a single time slot is analyzed. State-of-the-art TDMA protocols assume that one packet can be transmitted in each slot and optimize the number of slots each node gets under this assumption. However, when nodes can transmit more than one packet, the performance of such a schedule is impaired. To solve this, the M-TreeMAC protocol is proposed; this protocol considers the actual number of packets transmitted in a time slot and optimizes the schedule accordingly. Furthermore, it is observed that the routing topology heavily impacts the schedule length created using this algorithm; an algorithm that optimizes the topology to result in the shortest schedule when M-TreeMAC is used is proposed, increasing benefits even further. Finally, the accuracy of the 2-hop interference model, commonly used by state-of-the-art TDMA scheduling algorithms, is studied and simulated using a realistic radio model based on measurement results. The results show high packet loss ratios for packets traveling a large number of hops to reach the sink. The adaptive interference model is proposed to improve the 2-hop interference model. The proposed model can increase throughput significantly in networks with a height of ten or more hops.}, subject = {WSN; TDMA; Interference; Throughput optimization; Drahtlose Sensornetzwerke; Interferenz; Maximierung des Datendurchsatzes; Zeitmultiplexverfahren; Drahtloses Sensorsystem; Funk{\"u}bertragung; TDMA; Energieeffizienz}, language = {en} } @misc{AlpirezBock2015, type = {Master Thesis}, author = {Alpirez Bock, Estuardo}, title = {SCA resistent implementation of the Montgomery kP-algorithm}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-36288}, school = {BTU Cottbus - Senftenberg}, year = {2015}, abstract = {Mathematically, cryptographic approaches are secure. This means that the time an attacker needs for finding the secret by brute forcing these approaches is about the time of the existence of our world. Practically, an algorithm implemented in hardware is a device that generates a lot of additional data during the calculation process. Its power consumption, electromagnetic radiation, etc. can be measured, saved and analysed for key extraction. Such attacks are called side channel analysis attacks and are significant threats when applying cryptographic algorithms. By considering these attacks when implementing a cryptographic algorithm, it is possible to design an implementation that is more resistant against them. The goal of this thesis was to design a methodology to securely implement the Montgomery kP-operation using an IHP implementation as a starting point. In addition, the area and energy consumption of the secure Montgomery kP-multiplier should still be highly efficient. The resistance against power analysis attacks of two different IHP ECC implementations was analysed in this thesis. A horizontal power analysis attack using the difference-of-means test was performed with the goal of finding potential leakage sources exploited in side channel analysis attacks, i.e. finding the reasons of a correct extraction of the cryptographic key. For both analysed ECC designs, four key candidates were extracted with a correctness of 90\% or more. Through analysis of the implemented Montgomery kP-algorithm's functionality and its power consumption, it was established that the algorithm's operation execution flow was the main cause of the implementations' vulnerability. Thus, a design methodology consisting in changing the Montgomery kP-algorithm operation flow was developed. As a result, the re-designed implementations do not deliver any correctly extracted key candidates whenever the difference-of-means test is performed on them. These re-designs implied an increase on the chip area by about 5\% for each implementation. The execution time needed for performing a complete kP-operation was reduced for both designs. Thereby one implementation's execution time was reduced by 12\% in comparison to its original version and even though its power consumption was increased by 9\%, its energy consumption per kP-operation was reduced by 4.5\%.}, subject = {Side channel analysis; Elliptic curve cryptography; Power analysis; Difference-of-means test; Elliptic curve point multiplication; Elliptische Kurve; Kryptologie}, language = {en} } @phdthesis{SojkaPiotrowska2016, author = {Sojka-Piotrowska, Anna}, title = {On the applicability of short key asymmetric cryptography in low power wireless sensor networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-39453}, school = {BTU Cottbus - Senftenberg}, year = {2016}, abstract = {The growing popularity of Wireless Sensor Networks (WSN) makes the spectrum of their applications very wide. A great number of the application areas like health monitoring or military applications require a high level of security and dependability from the wireless sensor network. Solving these issues can be supported by providing cryptographic solutions into WSN applications. Since the WSNs mainly consist of low power devices, cryptographic solutions ideal for WSNs should provide computationally lightweight security mechanisms producing small data packets and ensuring confidentiality. Cryptographic mechanisms that have both these features are considered in this thesis, which main objective is the analysis of the applicability of the short key elliptic curve cryptography in WSN environments. Reduced key lengths require modification of the standard ECC security algorithms to provide authentication and also a novel solution for a cryptographic secure pseudo-random number generator. The proposed solution is based on the standard ECC, but it differs in several aspects. The main difference is that the parameters of the used elliptic curve have to be kept secret. This is due to the fact that solving the Discreet Logarithm Problem (DLP) for such short parameters can be done in short time. Additionally, using shorter parameters for the underlying elliptic curves excludes also the use of standard hash functions, what mainly influences the mechanisms for generating the digital signature. Hash functions require large input values and produce relatively large output data that is inapplicable in the shortECC environment. Thus, within this thesis a modified version of standard Elliptic Curves Digital Signature Algorithm is proposed, which does not require any hash function. The shortECC needs pseudo-random numbers in the encryption and the digital signature protocols, but since it operates on numbers that are significantly shorter than the ones used by other cryptographic approaches, pseudo-random number generators for standard approaches are not suitable for shortECC. Thus, the new pseudo-random number generator not involving any additional hardware besides the modules available on the used test platform and operating on 32-bit long integers, is proposed. The randomness of the numbers generated by the proposed algorithm and their applicability for cryptographic purposes was evaluated using the NIST test suites. The shortECC approach was also subjected to cryptanalysis in order to proof its security and determine the circumstances and constraints for its application.}, subject = {ECC; Sensor Networks; PRNG; Lightweight cryptography; Elliptische Kurven; Drahtlose Sensornetze; Zufallszahl Generator; Drahtloses Sensorsystem; Zufallsgenerator}, language = {en} } @phdthesis{Stecklina2016, author = {Stecklina, Oliver}, title = {A secure isolation of software activities in Tiny Scale Systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-40362}, school = {BTU Cottbus - Senftenberg}, year = {2016}, abstract = {With the introduction of the Internet at the end of the last century the modern society was fundamentally changed. Computer systems became an element of nearly all parts of our daily live. Due to the interconnection of these systems local borders are mostly vanished, so that information is accessible and exchangeable anywhere and at anytime. But this increased connectivity causes that physical fences are no longer an adequate protection for computer systems. Whereas the security of commodity computer systems was improved continuously and similarly with their increased connectivity, deeply embedded systems were then and now mostly protected by physical fences. But the ubiquitous availability of embedded systems in personal and commercial environments makes these systems likewise accessible and moves them strongly into the focus of security investigations. Deeply embedded systems are usually equipped with tiny scale micro controllers, which are limited in their available resources and do not feature secure mechanisms to isolate system resources. Hence, a single error in a local software component is not limited to the component itself, instead the complete system may be influenced. The lack of resource isolation makes tiny scale systems prone for accidental errors but in particular vulnerable for a broad variety of malicious software. For a safe and secure operation of computer systems it is strongly recommended that software components are isolated in such a manner that they have access only to those resources, which are assigned to them. Even though a substantial number of approaches in the context of embedded system's safety were investigated during the last fifteen years, security was mostly neglected. This thesis is focused on security aspects where malicious software wittingly tries to bypass available protection mechanisms. The thesis introduces a security platform for tiny scale systems that enforces an isolation of software components considering security aspects. Due to the limited resources of tiny scale systems the proposed solution is based on a co-design process that takes the static and predefined nature of deeply embedded systems into account and includes hardware, compile-time, and run-time partitions to reduce the number of additional run-time components, to avoid performance drawbacks, and to minimize the memory as well as the components footprint overhead. To prove the applicability of the presented platform it was applied and evaluated with two real applications. In addition, an investigation of technologies of commodity computer systems that are suitable to build secure systems is presented. The thesis analyzes their enforcement based on the features provided by the introduced security platform. The contributions of this thesis include an enforcement of a security isolation of system resources on tiny scale systems and enable the development of a broad variety of secure tiny scale system applications.}, subject = {Security; Embedded systems; Memory protection; Hardware software co-design; Role-based access control; Eingebettete Systeme; Sicherheit; Speicherschutz; Rollen-basierte Zugriffskontrolle; Hardware-Software Co-Design; Eingebettetes System; Speicherzugriff; Zuverl{\"a}ssigkeit}, language = {en} } @misc{Kwarteng2025, type = {Master Thesis}, author = {Kwarteng, Ferdinand Prempeh}, title = {Security requirements and mechanisms for real-time serverless computing : a focus on Function-as-a-Service (FaaS)}, doi = {10.26127/BTUOpen-7164}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-71645}, school = {BTU Cottbus - Senftenberg}, year = {2025}, abstract = {Serverless computing, particularly its Function-as-a-Service (FaaS) offering, has emerged as a prominent cloud computing model empowering developers to craft event-driven applications by deploying custom application logic in an ephemeral runtime environment. This model abstracts away the underlying infrastructure complexities and management overhead, offering key benefits such as Pay-As-You-Go (PAYG) billing and automated scalability. Although Real-Time (RT) extensions to FaaS (RT-FaaS) have been proposed for realizing the functional requirements of industrial automation virtualization use cases, these architectures introduce distinct security challenges that generic security mechanisms cannot fully address. As a result, RT-FaaS platforms remain vulnerable to both internal and external threats and a variety of exploitation vectors that can compromise the Confidentiality, Integrity and Availability (CIA) security properties of such a platform. The aim of this thesis is to assess the core security requirements of a RT-FaaS platform by evaluating potential vulnerabilities and risks within this environment. We further propose specifically tailored mitigation mechanisms based on the extended Berkeley Packet Filter (eBPF) and cryptographic digital signatures for a RT-FaaS platform with a WebAssembly (WASM) runtime to enhance its resilience without imposing prohibitive performance overheads. Key areas of focus for this work include RT-FaaS runtime isolation, continuous security enforcement and data protection. We evaluated the proposed security mitigation mechanisms on the platform accounting for both effectiveness and performance. By doing so, we discuss the trade-offs between the stringent security requirements and maintaining predictable, low-latency execution essential for industrial automation use cases. While we observed some considerable jitter and latency overheads on the secure RT-FaaS platform, these did not significantly impact the functional performance of the platform. Hence adopting serverless computing for RT industrial automation use cases is feasible while ensuring robust function, system and network security.}, subject = {Serverless computing; Real-time Function-as-a-Service; Industrial automation; Security mechanisms; WebAssembly runtime; Serverloses Computing; Echtzeit-Funktion-als-Dienstleistung; Industrielle Automatisierung; Sicherheitsmechanismen; WebAssembly-Laufzeitumgebung; Serverless Computing; Function as a Service; Sicherheit; Datenschutz}, language = {en} } @phdthesis{Maaser2010, author = {Maaser, Michael}, title = {Design and realization of privacy guaranteeing means for context-sensitive systems}, isbn = {978-3-8322-9448-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-19596}, school = {BTU Cottbus - Senftenberg}, year = {2010}, abstract = {Privacy issues are becoming more and more important, especially since the cyber and the real world are converging up to certain extent when using mobile devices. Means that really protect privacy are still missing. The problem is, as soon as a user provides data to a service provider the user looses control over her/his data. The simple solution is not to provide any data but then many useful services, e.g., navigation applications, cannot be used. The dissertation addresses two aspects of privacy protection. The first aspect regards not producing private information if possible. Such unnecessary information are traces of access controlled service uses. Hence, one approach in this dissertation enables k-anonymous authorization for services uses. It equips the users of the system with trusted pseudonymous certificates reflecting their respective authorizations. Analogous to anonymous e-cash, the certificates are issued by a trusted authority with knowledge of the actual authorizations of an identified user. The certificates can be verified by any service supported by the trusted authority but without knowledge of the user's identity. Not even the issuing authority is able to reveal the users identity from the pseudonym of a certificate. Hence, service usage cannot be tracked, neither by the service nor by the authority. This protects the privacy of service usage behavior of users. The second aspect of privacy protection is to remain in control over private data released to others. Temporary release of private data is essential to context-sensitive services, which rely on these context data to provide or improve added value. Therefore, the dissertation designs a Privacy Guaranteeing Execution Container (PGEC), which enables applications to access private user data and guarantees that the user data is deleted as soon as the service or application is finished. Basically, the concept is that the application obtains access to the user data in a specially protected and certified environment, the PGEC. The PGEC also restricts the communication between the application and the service provider to what is explicitly allowed by the service user. In addition to those means, the PGEC also implements countermeasures against malicious attacks such as modified host systems and covert channel attacks, which might be misusing CPU load to signal data out of the PGEC. Thus, the PGEC guarantees a "one time use" of the provided private data.}, subject = {Digital Rights Management; Datenschutzfreundliche Techniken; Anonymit{\"a}t; DRM; Dateneinmalnutzung; Privacy enhancing techniques; Anonymity; DRM; One-time-use of data}, language = {en} } @phdthesis{Vater2017, author = {Vater, Frank}, title = {Secure Scan Chain and Debug Interface}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-42932}, school = {BTU Cottbus - Senftenberg}, year = {2017}, abstract = {Cryptographic operations are more and more popular because unencrypted information is a security leakage in many application areas. Possible ways to get the secret key is the misuse of test and debug facilities. These interfaces allow a reading and writing access to all internals of the ASIC. Typically they are not protected against a misuse by a third party. In this thesis a new countermeasure against side channel attacks on scan chain and debug interfaces is proposed. The countermeasure for the scan chain interface, as well as the one for the debug interface, use the same approach, but every interface has its own security component. The approach designed and investigated in this thesis is based on a key matching method, which is resistant against reverse engineering. It is necessary from the user side, to test and debug the device, to write the secret key in an OTP. The "golden" key is embedded in specially designed units, which are used to compare the golden key with one provided in the OTP. After testing or the debugging, the key in the OTP is deleted. Even if this key is known to an attacker, it is not possible to rewrite the value into the OTP. The unit which contains golden key and compare logic is made of digital standard cells. The cells are not modified, but the wiring has a novelty. Small isolation elements from the analog circuit design are used to implement a "0" or a "1" as value for the golden key. The security feature is the resistance against optical reverse engineering because both types of the golden key and compare unit have the same footprint. Finally 128 of these units compose the 128 bit golden key. The scan chain solution is suitable for any IP core, independent of whether it is a standalone cryptographic component, a microcontroller or a very complex system on a chip. For the scan chain test the test pattern generated by the scan pattern generator can be used without any modification. The only requirement is that before the test, the secret key has to be written into the device, and after the test, the secret key has to be deleted. For a debug interface the same problem exists as for the scan chain interface. An access to the device is an open door for an attacker. As the debug interface is used in different development stages the approach is to implement several OTP lines - one per development stage for example. Additionally a mechanism for different access levels is offered. Depending of the access level different address spaces are unlocked. As shown in this thesis, the solutions for a secure scan and debug interface are easy to integrate into an existing design, while area, timing and power is not influenced significantly. The scan or debug process have to be changed only slightly and the test coverage is not affected and defect analysis is possible. To summarize in this thesis a novel and innovative approach to protect scan and debug interfaces against side channel attacks was designed and evaluated.}, subject = {Scan chain; Debug interface; Secure; Testschnittstelle; Debugschnittstelle; Sicher; Kryptoanalyse; Schnittstelle; Chiffrierung}, language = {en} } @phdthesis{Kabin2023, author = {Kabin, Ievgen}, title = {Horizontal address-bit SCA attacks against ECC and appropriate countermeasures}, doi = {10.26127/BTUOpen-6397}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-63973}, school = {BTU Cottbus - Senftenberg}, year = {2023}, abstract = {In this work we investigated the resistance of different kP implementations based on the Montgomery ladder against horizontal, i.e. single trace, attacks. Applying statistical methods for the analysis we were able to reveal the secret value k completely. The reason causing the success of our attacks is the key-dependent addressing of the registers and other design blocks, which is an inherent feature of binary kP algorithms. This dependency was successfully exploited in the past by Itoh et al. analyzing many hundreds of kP traces, i.e. this attack is a vertical address-bit differential power analysis attack against Montgomery ladder. The vulnerability of the Montgomery ladder against horizontal address-bit attacks was detected and demonstrated during our investigations. We were able to reveal the scalar k exploiting the address-bit vulnerability in single trace attacks using not only statistical methods, but also Fourier transform, selected clustering methods as well as one of the simplest methods - the automatized simple SCA. We performed successful horizontal address-bit SCA attacks against both types of ECs, i.e. against highly regular Montgomery ladder and against a binary kP algorithm implementing atomic patterns. The success of our attacks shows that the regularity and atomicity principles are not effective against horizontal address-bit attacks. As a means for reducing the attack success, we investigated the hiding ability of the field multiplier which is usually the largest block of kP designs. We implemented our field multiplier for ECs over prime fields corresponding to the 4-segment Karatsuba multiplication formula that reduces the execution time and the energy consumption for a kP operation by about 40 \% in comparison to multipliers exploiting the classical multiplication formula. However, the energy consumption per clock remained in our multiplier without significant changes, i.e. the protective hiding properties of the multiplier as a noise source were not decreased. Another advantage of our field multiplier is its inherent resistance to horizontal collision attacks, in contrast to multipliers based on the classic multiplication formula. Additionally, we proposed regular scheduling for the block addressing as an effective strategy for reducing the success of horizontal address-bit attacks. Combining this approach with the hiding features of the field multipliers can increase the resistance of the kP designs for both types of ECs against a broad spectrum of SCA attacks. The mentioned analysis methods can be successfully applied for determining SCA leakage sources in the early design phase.}, subject = {Elliptic curve cryptography; Hardware implementation; Side-channel analysis; Horizontal attacks; Countermeasures; Elliptische Kurven-Kryptographie; Hardware Implementierung; Seitenkanalanalyse; Horizontale Angriffe; Gegenmaßnahmen; Seitenkanalattacke; Fehleranalyse; Differentielle Kryptoanalyse; Kryptosystem; Implementierung }, language = {en} } @misc{Sobhani2024, type = {Master Thesis}, author = {Sobhani, Mona}, title = {Investigating the potential of a hybrid quantum-classical approach for prime factorization}, doi = {10.26127/BTUOpen-6814}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-68144}, school = {BTU Cottbus - Senftenberg}, year = {2024}, abstract = {The RSA algorithm is a fundamental component of secure communication, relying on the computational complexity of factoring large integers into primes. However, Shor's algorithm proposes that quantum computers could theoretically find these prime factors with exponential speedup, even for large integers. Despite this potential, current quantum computers are not yet capable of breaking RSA. Today's Noisy Intermediate-Scale Quantum (NISQ) devices, which are accessible for experimentation through IBM's cloud-based platform, enable researchers to test algorithms and investigate the potential quantum advantage given the existing capabilities of quantum computers. This thesis explores a hybrid quantum-classical approach that uses quantum computing for the computationally expensive parts of the problem and a classical optimizer. This study employed IBM's quantum simulator and real quantum computer to evaluate the effectiveness of the approach. The quantum simulator successfully factorized the number 1048561, while the real quantum computer factorized the number 253.}, subject = {Quantum computing; Hybrid quantum-classical approach; Prime factorization; RSA; Variational Quantum Eigensolver; Quantencomputing; Primenfaktorisierung; Hybrider quantum-klassischer Ansatz; RSA-Verschl{\"u}sselung; Primzahlzerlegung; Quantencomputer; Simulation}, language = {en} } @phdthesis{Ortmann2010, author = {Ortmann, Steffen}, title = {Definition and configuration of reliable event detection for application in wireless sensor networks}, isbn = {978-3-8322-9445-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-19602}, school = {BTU Cottbus - Senftenberg}, year = {2010}, abstract = {Ubiquitous systems based on wireless sensor networks will amazingly increase our quality of life. These systems are to be deployed in large areas with high density where hundreds or thousands of nodes are used. Certainly that demands to use low cost devices with limited resources, which in turn are prone to faulty behaviour. This work introduces a novel concept for wireless sensor network configuration considering fault tolerance, energy efficiency and convenience as primary goals while being tailored to user needs. It allows to ignore low-level details like node resources, network structures, node availability etc. and enables the programmer to work on a high abstraction level, namely the event itself including event related constraints. The definition of events characterising real world phenomena is of prominent use in sensor networks. The presented concept autonomously configures and monitors events, even if it requires to organise collaboration between nodes to deliver the results. The contribution of this work is threefold. An intuitive XML-based ESL simplifies event configuration to a level that is even suitable for non-professionals. It features hardware independent description elements to define complex phenomena and enhances these by tailor-made voting schemes and application constraints. Based on that, a novel, fully decentralised mechanism to autonomously set up distributed event detection called EDT and a cost efficient means to maintain such EDT, are presented. EDTs can be efficiently constructed on every device by using a tiny generating finite state machine requiring eight states only. It enables every node to self-divide event queries according to its own resources and self-adapt to the tasks assigned. Simultaneously, the EDT provides the interface for efficient collaboration using a lease-based publish/subscribe approach. The simulations clearly show that this concept works well and the applied collaboration scheme outperforms even idealised acknowledgement-based approaches. On top of the EDT, a means is developed that enhances the reliability of detection beyond the scope of Boolean event decision. It examines behavioural trends in sensor readings to indicate the significance of actual measurements in relation to the configured event. Measured data is investigated in detail to finally attach a significance indicator "is" to each event. This automatically generated indicator shall support users or overlaying systems in decision-making. In the example scenario based on data of real test cases, the "is" indicates a flaming fire 88 seconds and a smouldering fire 48 seconds before the threshold-based method triggers the alarm.}, subject = {Drahtloses Sensorsystem; Zuverl{\"a}ssigkeit; Zuverl{\"a}ssigkeit; Ereigniserkennung; Verbraucherfreundlichkeit; Drahtlose Sensornetze; Reliability; Event detection; Usability; Wireless Sensor Networks}, language = {en} } @phdthesis{Wittke2021, author = {Wittke, Christian}, title = {Untersuchung der Auswirkung von Layout Varianten von Beschleunigern f{\"u}r kryptographische Operationen auf Seitenkan{\"a}le}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-57171}, school = {BTU Cottbus - Senftenberg}, year = {2021}, abstract = {Die zunehmende Digitalisierung unserer Gesellschaft erfordert fortw{\"a}hrenden Schutz von kritischen Informationen mittels kryptographischer Systeme. Algorithmisch sind diese Systeme kaum anzugreifen. Allerdings erm{\"o}glichen sogenannte Seitenkanalattacken die Extraktion von geheimen Daten. Eine Art Seitenkanalattacken mit großem Angriffspotential ist die lokalisierte elektromagnetische Analyse. Bei diesem Verfahren werden durch Messung und Analyse der elektromagnetischen Abstrahlung von Teilen der Schaltung R{\"u}ckschl{\"u}sse auf interne Zust{\"a}nde getroffen und dadurch geheime Informationen wie private Schl{\"u}ssel extrahiert. Der aktuelle Stand der Forschung im Bereich lokalisierter EMA zeigt sowohl erfolgreiche Angriffe als auch Gegenmaßnahmen auf. Jedoch werden in den ver{\"o}ffentlichten Arbeiten keine detaillierten Untersuchungen hinsichtlich der genauen Umsetzung des Layouts der FPGA Implementierungen untersucht, welches die Ursache f{\"u}r den lokalisierten Leakage bildet. Stattdessen werden die Ursachen auf der algorithmischen Ebene des verwendeten kryptographischen Verfahrens betrachtet und ebenfalls eine algorithmische Gegenmaßnahme vorgeschlagen. In dieser Arbeit wird der Einfluss des Layouts von FPGA und ASIC Implementierungen auf lokalisierte EMA Angriffe untersucht. Dabei spielt insbesondere die Platzierung der Register eine wichtige Rolle. Die Untersuchungen in dieser Arbeit werden genutzt, um weiterf{\"u}hrende Maßnahmen zum Schutz von kryptographischen Implementierungen planen zu k{\"o}nnen. Die Untersuchungsergebnisse zeigen, dass sowohl das Design selbst als auch die Implementierung als FPGA oder ASIC einen großen Einfluss auf die Angreifbarkeit der Implementierung durch eine lokalisierte EMA haben. So kann die FPGA Implementierung des betrachteten ECC Designs ebenfalls f{\"u}r diese Art von Angriffen verwundbar sein. Aufgrund ihrer Implementierung ist dieser ECC-Beschleuniger insgesamt weniger anf{\"a}llig als ein Vergleichsdesign aus dem Stand der Forschung. Dennoch konnten potentielle Schwachstellen identifiziert werden, welche die Grundlage f{\"u}r die Planung und Umsetzung von Gegenmaßnahmen darstellen. Bei einer FPGA Implementierung des ECC Beschleunigers ist haupts{\"a}chlich die Lage der Flip-Flops im Layout die Ursache f{\"u}r die Anf{\"a}lligkeit. Dem kann durch eine gleichm{\"a}ßige Verteilung der Register zusammen mit permanent aktiver Logik entgegengewirkt werden, da die EM Abstrahlung der Logik die der Register {\"u}berdeckt. Im Gegensatz dazu konnte bei der ASIC Implementierung keine Aussage {\"u}ber den Einfluss von Platzierungsvarianten getroffen werden, da die Einfl{\"u}sse nicht messbar waren. Allerdings konnte die Verdrahtung auf der obersten Metallschicht als Leakagequelle bestimmt werden. Als Gegenmaßnahme dazu wird vorgeschlagen, durch Constraints die Ausgangssignale von Registern eines kryptographischen Chips nicht bis in die oberste Schicht zu routen.}, subject = {Seitenkanalanalyse; ECC; EMA; Layout; Elektromagnetische Analyse; Elliptic Curve Cryptography; Localized EMA; Decapsulation; FPGA; ASIC; Seitenkanalattacke; Field programmable gate array; Layout }, language = {de} } @phdthesis{Peter2011, author = {Peter, Steffen}, title = {Tool-supported development of secure wireless sensor networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-24671}, school = {BTU Cottbus - Senftenberg}, year = {2011}, abstract = {The development of secure systems is already a very challenging task. In the domain of wireless sensor networks this challenge is even aggravated by severe constraints of the sensor node devices and the exposed character of the networks. To cope with this issue, this thesis proposes a tool-supported development flow named configKIT, that helps users to integrate secured applications in the domain of Wireless Sensor Networks. It is a component-based framework that selects and composes configurations of hardware and software components for WSN applications from high-level user requirements, automatically. Therefore, the composition process utilizes a flexible meta-model to describe properties of the components, the requirements, and the system semantics, which allows the assessment of the behavior of the composed system. Based on this modeling technology five practical security models are investigated, which base on different technical views on a general security ontology for WSNs. Each model is discussed theoretically and practically, based on a practical integration in the configKIT framework. The configuration toolkit and the security models are finally evaluated by applying the techniques developed to the non-trivial example of secure in-network aggregation. The evaluation shows that all five practical security models developed in this thesis work correctly and with reasonable model overhead. These results promote the notion of a practically applicable toolkit to configure secure applications in WSNs.}, subject = {Drahtloses Sensorsystem; Drahtlose Sensor Netzwerke; Sicherheit; Konfiguration; Wireless Sensor Networks; Security; Configuration}, language = {en} } @phdthesis{Piotrowski2011, author = {Piotrowski, Krzysztof}, title = {Assessment of the feasibility of distributed shared memory and data consistency for wireless sensor networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-23718}, school = {BTU Cottbus - Senftenberg}, year = {2011}, abstract = {Wireless sensor networks (WSNs) are built of cheap, resource constraint devices, capable to collect process and communicate data. WSN applications depend on the data they collect. In other words, the applications require the data to be available, even if some WSN nodes fail. The challenge is that nodes are prone to fail and todays WSNs do not provide highly reliable data storage. Thus, the quality of the service provided by the system, regarding the data handling, is one of the most important factors. Data replication increases the availability of the data and thus, the robustness and quality of the data storage. But the existence of several copies of data items in the WSN induces the data consistency to become of high importance in order to ensure proper behavior of the application. This work investigates the feasibility of data consistency models used in distributed shared memory in WSNs to provide more powerful distributed systems with reliable data exchange. As a starting point WSNs and consistency approaches are introduced. Based on those basics, the mechanisms needed to allow for data consistency are discussed as a theoretical framework for the prototypical implementation of a data consistency providing middleware, which was implemented as part of this work. The middleware adapts the mechanisms known from original memory consistency approaches to be usable in the sensor network area and proposes own, low cost mechanisms, as well. The latter are at least partially based on the idea that within the shared memory of WSNs information is the major concern and that by that the replica update rates can be tailored to the application. In order to allow for ease of use of the middleware the replication schemes and consistency mechanisms can be defined by the application engineer as a policy. The latter is transformed and injected into the middleware code by a pre-compiler, so that the application engineer no longer needs to implement replication and consistency mechanisms herself. The most appropriate memory consistency models are implemented and evaluated using the framework proposed in this thesis.}, subject = {Verteiltes System; Drahtloses Sensorsystem; Verteilter gemeinsamer Speicher; Drahtlose Sensornetze; Datenkonsistenz; Distributed shared memory; Wireless sensor networks; Data consistency}, language = {en} } @phdthesis{Lehniger2025, author = {Lehniger, Kai}, title = {Stack buffer overflow attacks and countermeasures with register windows on the example of Xtensa LX}, doi = {10.26127/BTUOpen-7226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus4-72262}, school = {BTU Cottbus - Senftenberg}, year = {2025}, abstract = {Stack buffer overflows are vulnerabilities that occur due to missing bounds checks when writing data into local buffers. These vulnerabilities have been exploited for decades and persist until today due to the usage of popular programming languages like C and C++, which allow arrays to be accessed without any checks. Over the years a wide variety of countermeasures was introduces, ranging from static analysis to various forms of runtime checks. Register windows are a hardware architecture feature that was developed for Reduced Instruction Set Computer (RISC) architectures to reduce the overhead for function calls and returns due to the necessary load and store instructions. With register windows, a processor is equipped with a large number of registers, but only a part, the register window, is visible to the program. With a function call, the register window moves to a new set of registers for the function to use. When the function returns, the register window moves back. The physical register set is used like a ring buffer and if it overflows, an exception interrupts the program execution to save the content of the overflown registers to the stack. Underflow exceptions on the other hand are responsible to restore the content when the control flow returns to a function that had its registers overflown. With return addresses, stack pointers, and function arguments being held in registers, often being targeted by stack buffer overflow attacks, the register window mechanism has a direct influence on these attacks and their countermeasures. This thesis investigates these influences using the Xtensa Lx architecture as an example, as it is used in the popular ESP32 microcontroller. In a first part, window overflows and underflows are used to craft stack buffer overflow based attacks, with a focus being Return-Oriented Programming (ROP). A second part investigates selected countermeasures, including canaries, XOR cookies, shadow stacks, and encryption. Here the focus is how these countermeasures can or need to be adjusted in order to be used with register windows or how register windows can be used to improve performance or security. A following chapter addresses specific aspects of the implementation of said countermeasures. All countermeasure implementations have been evaluated using Bristol/Embecosm Embedded Benchmark Suite (BEEBS), a collection of 80 benchmarks for embedded devices, to show their performance for a wide variety of use-cases. The presented results show the potential of register windows to implement very performant countermeasures against stack buffer overflow attacks.}, subject = {Buffer overflow; IT-security; Xtensa; Register window; Puffer{\"u}berl{\"a}ufe; Registerfenster; IT-Sicherheit; Gegenmaßnahmen; Angriffe ; Puffer{\"u}berlauf; Computersicherheit; Gegenmaßnahme}, language = {en} }