@article{LengDimmery, author = {Leng, Yan and Dimmery, Drew}, title = {Calibration of Heterogeneous Treatment Effects in Randomized Experiments}, series = {Information Systems Research}, journal = {Information Systems Research}, doi = {10.1287/isre.2021.0343}, abstract = {Machine learning is commonly used to estimate the heterogeneous treatment effects (HTEs) in randomized experiments. Using large-scale randomized experiments on the Facebook and Criteo platforms, we observe substantial discrepancies between machine learning-based treatment effect estimates and difference-in-means estimates directly from the randomized experiment. This paper provides a two-step framework for practitioners and researchers to diagnose and rectify this discrepancy. We first introduce a diagnostic tool to assess whether bias exists in the model-based estimates from machine learning. If bias exists, we then offer a model-agnostic method to calibrate any HTE estimates to known, unbiased, subgroup difference-in-means estimates, ensuring that the sign and magnitude of the subgroup estimates approximate the model-free benchmarks. This calibration method requires no additional data and can be scaled for large data sets. To highlight potential sources of bias, we theoretically show that this bias can result from regularization and further use synthetic simulation to show biases result from misspecification and high-dimensional features. We demonstrate the efficacy of our calibration method using extensive synthetic simulations and two real-world randomized experiments. We further demonstrate the practical value of this calibration in three typical policy-making settings: a prescriptive, budget-constrained optimization framework; a setting seeking to maximize multiple performance indicators; and a multitreatment uplift modeling setting.}, language = {en} } @article{AllcottGentzkowMasonetal., author = {Allcott, Hunt and Gentzkow, Matthew and Mason, Winter and Wilkins, Arjun and Barber{\´a}, Pablo and Brown, Taylor and Cisneros, Juan Carlos and Crespo-Tenorio, Adriana and Dimmery, Drew and Freelon, Deen and Gonz{\´a}lez-Bail{\´o}n, Sandra and Guess, Andrew M. and Kim, Young Mie and Lazer, David and Malhotra, Neil and Moehler, Devra and Nair-Desai, Sameer and Nait El Barj, Houda and Nyhan, Brendan and Paixao de Queiroz, Ana Carolina and Pan, Jennifer and Settle, Jaime and Thorson, Emily and Tromble, Rebekah and Velasco Rivera, Carlos and Wittenbrink, Benjamin and Wojcieszak, Magdalena and Zahedian, Saam and Franco, Annie and Kiewiet de Jonge, Chad and Stroud, Natalie Jomini and Tucker, Joshua A.}, title = {The effects of Facebook and Instagram on the 2020 election: A deactivation experiment}, series = {Proceedings of the National Academy of Sciences}, volume = {121}, journal = {Proceedings of the National Academy of Sciences}, number = {21}, doi = {10.1073/pnas.2321584121}, abstract = {We study the effect of Facebook and Instagram access on political beliefs, attitudes, and behavior by randomizing a subset of 19,857 Facebook users and 15,585 Instagram users to deactivate their accounts for 6 wk before the 2020 U.S. election. We report four key findings. First, both Facebook and Instagram deactivation reduced an index of political participation (driven mainly by reduced participation online). Second, Facebook deactivation had no significant effect on an index of knowledge, but secondary analyses suggest that it reduced knowledge of general news while possibly also decreasing belief in misinformation circulating online. Third, Facebook deactivation may have reduced self-reported net votes for Trump, though this effect does not meet our preregistered significance threshold. Finally, the effects of both Facebook and Instagram deactivation on affective and issue polarization, perceived legitimacy of the election, candidate favorability, and voter turnout were all precisely estimated and close to zero.}, language = {en} }