@phdthesis{Neiling2004, author = {Neiling, Mattis}, title = {Identifizierung von Realwelt-Objekten in multiplen Datenbanken}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-000000437}, school = {BTU Cottbus - Senftenberg}, year = {2004}, abstract = {Die Daten von Realwelt-Objekten k{\"o}nnen in mehreren Datenbanken enthalten sein, ohne daß ein globaler und konsistenter Identifizierer existiert. Wie l{\"a}ßt sich herausfinden, welche der Daten sich auf dieselben Realwelt-Objekte beziehen? Das hier dargestellte allgemeine Modell f{\"u}r die Objektidentifizierung besteht aus den Schritten Konversion, Vergleich und Klassifikation. Es umfaßt zudem: (1) Identifizierungskonzepte, (2) die Softwarearchitektur, (3) Charakteristika der Datenqualit{\"a}t, (4) eine Vorauswahlmethode, die die Effizienz f{\"u}r große Datenbanken sicherstellt (unter Verwendung von Indexstrukturen) und (5) eine Spezifikation f{\"u}r die Evaluation von Verfahren, einschließlich Stichprobenziehung und Qualit{\"a}tskriterien. Wir bewerteten verschiedene Verfahren mit Wohnungs-, Adreß- und Bibliotheksdaten. Wesentliche Ergebnisse sind, daß die Skalierbarkeit ausschließlich durch die verwandte Vorauswahlmethode und deren Umsetzung bestimmt ist sowie daß das Entscheidungsbaumverfahren eine h{\"o}here Korrektheit erreichte und robuster war als Record Linkage.}, subject = {Multidatenbanksystem; Objektorientiertes Datenbanksystem; Datensatz; Automatische Identifikation; Data integration; Instance integration; Record linkage; Merge/Purge; De-duplication}, language = {de} } @phdthesis{Jurk2005, author = {Jurk, Steffen}, title = {A Simultaneous Execution Scheme for Database Caching}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-228}, school = {BTU Cottbus - Senftenberg}, year = {2005}, abstract = {Database caching techniques promise to improve the performance and scalability of client-server database applications. The task of a cache is to accept requests of clients and to compute them locally on behalf of the server. The content of a cache is filled dynamically based on the application and users' data domain of interest. If data is missing or concurrent access has to be controlled, the computation of the request is completed at the central server. As a result, applications benefit from quick responses of a cache and load is taken from the server. The dynamic nature of a cache, the need of transactional consistency, and the complex nature of a request make database caching a challenging field of research. This thesis presents a novel approach to the shared and parallel execution of stored procedure code between a cache and the server. Every commercial database product provides such stored procedures that are coded in a complete programming language. Given a request in form of such a procedure, we introduce the concept of split twin transactions that logically split the procedure code into two parts, say A and B, such that A is executed at the cache and B at the server in a simultaneous and parallel manner. Furthermore, we analyse the procedure code to detect suitable parts. To the best of our knowledge, this has not yet been addressed by any existing approaches. Within a detailed case study, we show that our novel scheme improves the performance of existing caching approaches. Furthermore, we demonstrate that different load conditions of the system require different sizes of the parts A and B to gain maximal performance. As a result, we extend database caching by a new dimension of optimization, namely by splitting of the procedure code into A and B. To solve this problem of dynamically balancing the code execution between cache and server, we define the maximum performance of a database cache over time and propose a stochastic model to capture the average execution time of a procedure. Based on the execution frequencies of primitive database operations, the model allows us to partially predict the response times for different sizes of A and B, hence providing a partial solution to the optimization problem.}, subject = {Datenbankentwurf; Zugriff; Simultaneous Engineering; Datenbank-Cache-Technik; Datenbankoptimierung; Zwillingstransaktion; Database caching; Database optimization; Twin transactions; Simultaneous execution; Stored procedures}, language = {en} } @phdthesis{Steeg2000, author = {Steeg, Martin}, title = {RADD, raddstar - a rule based database schema ; compiler, evaluator, and optimizer}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-000000118}, school = {BTU Cottbus - Senftenberg}, year = {2000}, abstract = {The thesis discusses the problems of database development and maintenance, and presents an approach to conceptual tuning realized by conceptual design using the HERM/RADD notation. The RADD design tool has been designed in order to develope HERM specifications graphically. RADD adds semantics and operations to the design, which are not directly annotated on the graphical specification, such as "afunctional" dependencies and SQL operations and procedures. The RADD/raddstar system extends the graphical specification of the database schema with the posibility to specify the operations and with the invocations for transforming the schema, for evaluating transactions, and for optimizing the schema, each of which according the implicite requirements graphically modeled and the explicite requirements specified by means of the conceptual specification language (CSL). CSL is used as command line interface of the RADD/raddstar. The graphical RADD schema as well as the CSL specifications are compiled into terms of the RADD* data model by the system, such that these terms are used for further evaluation actions. The actions performed by the RADD/raddstar (schema transformation, transaction and cost evaluating, schema optimization) are based on rules, that can be developed and modified by the user using the CSL.}, subject = {Datenbankentwurf; Schemaevolution; Entity-Relationship-Datenmodell; Konzeptionelle Modellierung; Typinferenz}, language = {en} } @phdthesis{Srinivasa2001, author = {Srinivasa, Srinath}, title = {An algebra of fixpoints for characterizing interactive behavior of information systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-000000143}, school = {BTU Cottbus - Senftenberg}, year = {2001}, abstract = {The dynamics of an information system (IS) is characterized not only by its computational behavior, but also by its interactive behavior. Interactive dynamics forms an integral part of most information systems. Despite this, an understanding of the interactive nature of an IS is still low. Interaction impacts expressiveness of an IS at such fundamental levels that Wegner [Weg97, Weg99a] came with a contention saying interactive behavior cannot be modeled by Turing Machines (TMs). A TM is considered the foundational model of computation. It models computable functions that map between problem and solution domains. However, a TM models only non-interactive mappings. A mapping between a problem and a solution domain that is interactive in nature can change its direction of computation resulting from intermediate interactions. Based on this contention, Wegner proposes interaction (rather than computation) as the fundamental framework for IS modeling [Weg99]. In this thesis, we address Wegner's contention and the nature of interactive dynamics. An information system is modeled as a collection of semantic processes or Problem Solving Processes (PSPs). If these PSPs are interactive in nature, they are called open systems; and if they are non-interactive, such an IS is called a closed system. Intuitively, open system dynamics are known to be richer than closed system dynamics. We make this distinction precise in this thesis. Interaction is shown to be made up of three properties: computation, persistence of state across computations, and channel sensitivity. Persistence of state and channel sensitivity each contribute to richer behavioral semantics than just computation. This is shown by introducing a concept called the solution space of a semantic process. A solution space is the abstract domain characterized by the process dynamics. Interactive solution spaces are found to be richer than algorithmic solution spaces and also interactive solution spaces require at least a three-valued system of logic for their characterization. The earlier question of interactive behavior as applied to IS design is then revisited. Interactive dynamics of an IS characterize the IS functionality. We call the solution space of interactive IS behavior as its interaction space. The interaction space of an IS is contrasted with the object space of the IS which is concerned with the IS structure and state maintenance dynamics. The interaction space has a degree of autonomy with respect to the object space. This aspect is often not acknowledged in IS design, resulting in the intermixing of structural and functionality concerns. Separating these concerns can avoid certain conflicting problems in IS design, as well as provide better maintainability. We call this the "dual" nature of open systems. Based on this insight we propose an IS design paradigm called dualism, where an IS model is made up of an object schema, characterizing the IS structure and an interaction schema, characterizing the IS functionality. The interaction schema is characterized by a three-valued system of logic, representing a set of obligated (or liveness) behavior, permitted (or possible) behavior and forbidden behavior. The system should perform the obligated behavior to be termed functional; it may perform any of the permitted behavior and it may not perform forbidden behavior. An analysis of the dynamics of any real world system can make these three-valued characteristics apparent. Domain theory is used to propose solution space concept, and deontic logic is used to represent the three modalities of interactive IS behavior.}, subject = {Dialogsystem; Informationssystem; Semantischer Bereich; Fixpunkt ; Bereichstheorie; Deontische Logik; Interactive behavior of information systems; Tuning machine; Persistence of state; Object schema; Interaction schema}, language = {en} } @phdthesis{Caumanns2000, author = {Caumanns, J{\"o}rg}, title = {Automatisierte Komposition von wissensvermittelnden Dokumenten f{\"u}r das Word Wide Web}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-000000087}, school = {BTU Cottbus - Senftenberg}, year = {2000}, abstract = {In dieser Arbeit werden Entwurf und Implementierung eines Systems zur automatisierten Erstellung wissensvermittelnder Dokumente f{\"u}r das World Wide Web beschrieben. Grundidee des zugrundeliegenden Ansatzes ist die dynamische Generierung von Dokumenten auf Basis existierender Fragmente wie z.B. Texten, Grafiken, Videosequenzen, etc. Diese Basisbausteine werden als Medienobjekte bezeichnet. Medienobjekte werden in einem ersten Schritt {\"u}ber einen sog. Abh{\"a}ngigkeitsgraphen zueinander in Beziehung gesetzt. Anschließend werden die bez{\"u}glich gegebener Nutzervorgaben (Lernziele, Vorwissen, Dokumentstruktur, etc.) bestgeeigneten Medienobjekte ausgew{\"a}hlt und strukturiert. Durch Zusammenfassung benachbarter Objekte entstehen Seiten und Kapitel, die dem Nutzer als Hierarchie von HTML-Dateien zug{\"a}nglich gemacht werden k{\"o}nnen. Die einzigen beiden vom Autor des Dokuments manuell durchzuf{\"u}hrenden Aufgaben sind die Beschreibung von Medienobjekten durch Meta-Informationen und die abstrakte Spezifikation des zu erzeugenden Dokuments {\"u}ber vordefinierte Nutzungsszenarien. Dieses im Rahmen der vorliegenden Dissertation entwickelte Verfahren wird als Bottom-Up Generierung bezeichnet, da ausgehend von existierenden Medienobjekten zuerst eine Medienobjektstruktur, anschließend eine Seitenstruktur und erst zum Schluß die logische Kapitelstruktur berechnet wird. Die im Gegensatz hierzu stehenden, weit verbreiteten Top-Down Verfahren gehen genau umgekehrt vor: Hier wird anhand einer festen Struktur (z.B. einem konzeptuellen oder einem semantischen Netz) zuerst eine Kapitelstruktur festgelegt, zu der erst danach die passenden Medienobjekte gesucht und zusammengefaßt werden. Der Bottom-Up Ansatz bietet eine Reihe von Vorteilen: * Die erstellten Dokumente sind sehr einfach wart- und erweiterbar, da keine statischen Abh{\"a}ngigkeitsstrukturen gepflegt werden m{\"u}ssen. * Adaptierbarkeit und Adaptivit{\"a}t werden implizit unterst{\"u}tzt. * Die Kosten der Erstellung von Lehr- und Informationssystemen k{\"o}nnen stark reduziert werden, da auf bereits existierende Medien zur{\"u}ckgegriffen werden kann. Der gr{\"o}ßte Vorteil des in dieser Arbeit beschriebenen Bottom-Up Ansatzes ist jedoch, daß es sich um ein ausbauf{\"a}higes Modell handelt, mit dem der manuelle Aufwand des Autors durch den Einsatz von aktuellen und zuk{\"u}nftigen Verfahren aus dem Information Retrieval, der k{\"u}nstlichen Intelligenz, dem Data Mining und der Informationsintegration sukzessive reduziert werden kann. Das Fernziel dabei ist, eines Tages zu einer vollautomatischen Dokumentenerzeugung zu gelangen, die in der Lage ist, Anfragen eines Nutzers mit strukturierten, adaptierten und vor allem hochaktuellen Dokumenten zu beantworten.}, subject = {World Wide Web; Informationssystem; Elektronische Publikation; Generierung; Autorensystem}, language = {de} } @phdthesis{Rieckmann2001, author = {Rieckmann, J{\"o}rg}, title = {Entwicklung einer Reportingplattform als entscheidungsunterst{\"u}tzendes System}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-000000074}, school = {BTU Cottbus - Senftenberg}, year = {2001}, abstract = {Die zunehmende Globalisierung der M{\"a}rkte und der damit zusammenh{\"a}ngende steigende Wettbewerbsdruck stellen immer h{\"o}here Anforderungen an die teilnehmenden Unternehmen und deren Entscheider. Durch die Einf{\"u}hrung flacher Hierarchien wird sukzessive mehr Verantwortung auf den einzelnen Mitarbeiter delegiert. Dies f{\"u}hrt insbesondere auf der operativen Ebene zu einem {\"u}berproportionalen Anstieg der Komplexit{\"a}t einzelner Aufgabenbereiche. Die vorliegende Dissertation beschreibt die Entwicklung eines Systems zur Unterst{\"u}tzung des Endanwenders in komplexen Entscheidungssituationen. Aus der jeweiligen Anwendungsdomain werden als stabil erachtete Varianten (Prozesse, SQL-Statements und Programme) als Komponenten im Sinne von Componentware modelliert und dem Anwender mit robusten Verkn{\"u}pfungsmechanismen zur Verf{\"u}gung gestellt. Hieraus kann sich der Anwender (neue) L{\"o}sungsstrategien zusammenstellen und mittels einer systemseitig gesteuerten Benutzerf{\"u}hrung bearbeiten. Die flexible Ausnahmebehandlung (bei Exceptions) wird durch die Verwendung der Componentware-Konzepte und {\"u}ber die m{\"o}gliche Verkn{\"u}pfung / Adaption der Komponenten durch den Anwender zur Laufzeit gew{\"a}hrleistet.}, subject = {Unternehmen; Prozessmanagement; Entscheidungsunterst{\"u}tzungssystem; Komponente ; Entwurfsmuster; Template; Entscheidungsunterst{\"u}tzung; Workflowmanagement; Componentware; Template; Gesch{\"a}ftsprozeßmodellierung}, language = {de} } @phdthesis{Lewerenz2000, author = {Lewerenz, Jana}, title = {Automatic generation of human computer interaction in heterogeneous and dynamic environments based on conceptual modelling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-000000063}, school = {BTU Cottbus - Senftenberg}, year = {2000}, abstract = {Today, developers of human-computer interaction increasingly face high expectations regarding the adaptivity of interaction. The World Wide Web is a fitting example for how diverse and 'fickle' the demands on interaction are which users, service providers and even the available technical infrastructure pose. In this thesis, a general framework for the development of interaction for such heterogeneous and dynamic interaction environments is created. The main focus is the provision of abstract constructs which make it possible to specify interaction abstractly, i.e., independently from concrete properties of an individual interaction environment. We then show how such an abstract specification can be used to automatically create human-computer interfaces which - due to the fact that the automatic generation takes the current interaction environment into account - are tailored to the requirements of the current user, technical infrastructure, etc.}, subject = {Mensch-Maschine-Kommunikation; Generierung; Hardwareentwurf; Softwareentwicklung; Konzeptionelle Modellierung; Mensch-Maschine-Interaktion; Konzeptuelle Modellierung; Plattformunabh{\"a}ngigkeit}, language = {en} } @phdthesis{Feyer2003, author = {Feyer, Thomas}, title = {A component based approach to human computer interaction - specification, composition, and application to information services}, isbn = {3-89820-686-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:co1-opus-1524}, school = {BTU Cottbus - Senftenberg}, year = {2003}, abstract = {The discipline of software engineering is increasingly shifting from classical design and development tasks towards tasks concerning reuse, adaptation, and integration. Driving motivations for this shift are (i) decreasing development time and costs and (ii) increasing quality of design results. Unfortunately, these new tasks are not yet supported sufficiently. While classical approaches to information system's design are quite well suited to a design from scratch, they do not provide powerful concepts concerning reuse. In particular, methods concerning encapsulation of dialog structures are commonly neglected. An according approach which enables reuse of dialog structures poses substantially different problems. This thesis formalizes such an approach which is essentially based on models from three research areas: (i) interaction patterns, (ii) interaction specification, and (iii) component frameworks.}, subject = {Web Services; Mensch-Maschine-Kommunikation; Benutzerschnittstellenentwurfssystem; Komponente ; Formale Spezifikationstechnik; Interaktion; Komponente; Muster; Komposition; Interaction; Component; Pattern; Composition}, language = {en} }