@incollection{icml2020_3096,
abstract = {A dynamic treatment regime (DTR) consists of a sequence of decision rules, one per stage of intervention, that dictates how to determine the treatment assignment to patients based on evolving treatments and covariates\textquotesingle history. These regimes are particularly effective for managing chronic disorders and is arguably one of the critical ingredients underlying more personalized decision-making systems. All reinforcement learning algorithms for finding the optimal DTR in online settings will suffer O(\textbackslash sqrt\lbrace \vert D\_\lbrace X, S\rbrace \vert T\rbrace ) regret on some environments, where T is the number of experiments, and D\_\lbrace X, S\rbrace is the domains of treatments X and covariates S. This implies T = O (\vert D\_\lbrace X, S\rbrace \vert ) trials to generate an optimal DTR. In many applications, domains of X and S could be so enormous that the time required to ensure appropriate learning may be unattainable. We show that, if the causal diagram of the underlying environment is provided, one could achieve regret that is exponentially smaller than D\_\lbrace X, S\rbrace . In particular, we develop two online algorithms that satisfy such regret bounds by exploiting the causal structure underlying the DTR; one is based on the principle of optimism in the face of uncertainty (OFU-DTR), and the other uses the posterior sampling learning (PS-DTR). Finally, we introduce efficient methods to accelerate these online learning procedures by leveraging the abundant, yet biased observational (non-experimental) data.},
author = {Zhang, Junzhe},
booktitle = {Proceedings of Machine Learning and Systems 2020},
pages = {5673--5683},
title = {Designing Optimal Dynamic Treatment Regimes: A Causal Reinforcement Learning Approach},
year = {2020}
}