@misc{WinkelmannSchuettevonKleist, author = {Winkelmann, Stefanie and Sch{\"u}tte, Christof and von Kleist, Max}, title = {Markov Control Processes with Rare State Observation: Theory and Application to Treatment Scheduling in HIV-1}, issn = {1438-0064}, doi = {10.4310/CMS.2014.v12.n5.a4}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41955}, abstract = {Markov Decision Processes (MDP) or Partially Observable MDPs (POMDP) are used for modelling situations in which the evolution of a process is partly random and partly controllable. These MDP theories allow for computing the optimal control policy for processes that can continuously or frequently be observed, even if only partially. However, they cannot be applied if state observation is very costly and therefore rare (in time). We present a novel MDP theory for rare, costly observations and derive the corresponding Bellman equation. In the new theory, state information can be derived for a particular cost after certain, rather long time intervals. The resulting information costs enter into the total cost and thus into the optimization criterion. This approach applies to many real world problems, particularly in the medical context, where the medical condition is examined rather rarely because examination costs are high. At the same time, the approach allows for efficient numerical realization. We demonstrate the usefulness of the novel theory by determining, from the national economic perspective, optimal therapeutic policies for the treatment of the human immunodefficiency virus (HIV) in resource-rich and resource-poor settings. Based on the developed theory and models, we discover that available drugs may not be utilized efficiently in resource-poor settings due to exorbitant diagnostic costs.}, language = {en} } @misc{Winkelmann, author = {Winkelmann, Stefanie}, title = {Markov Control with Rare State Observation: Average Optimality}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-60981}, abstract = {This paper investigates the criterion of long-term average costs for a Markov decision process (MDP) which is not permanently observable. Each observation of the process produces a fixed amount of \textit{information costs} which enter the considered performance criterion and preclude from arbitrarily frequent state testing. Choosing the \textit{rare} observation times is part of the control procedure. In contrast to the theory of partially observable Markov decision processes, we consider an arbitrary continuous-time Markov process on a finite state space without further restrictions on the dynamics or the type of interaction. Based on the original Markov control theory, we redefine the control model and the average cost criterion for the setting of information costs. We analyze the constant of average costs for the case of ergodic dynamics and present an optimality equation which characterizes the optimal choice of control actions and observation times. For this purpose, we construct an equivalent freely observable MDP and translate the well-known results from the original theory to the new setting.}, language = {en} }