@workingpaper {643584, title = {An Honest Approach to Parallel Trends}, year = {Working Paper}, abstract = { This paper proposes robust inference methods for difference-in-differences and event-study designs that do not require that the parallel trends assumption holds exactly. Instead, the researcher must only impose restrictions on the possible differences in trends between the treated and control groups. Several common intuitions expressed in applied work can be captured by such restrictions, including the notion that pre-treatment differences in trends are informative about counterfactual post-treatment differences in trends. Our methodology then guarantees uniformly valid ("honest{\textquoteright}{\textquoteright}) inference when the imposed restrictions are satisfied. We first show that fixed length confidence intervals have near-optimal expected length for a practically-relevant class of restrictions. We next introduce a novel inference procedure that accommodates a wider range of restrictions, which is based on the observation that inference in our setting is equivalent to testing a system of moment inequalities with a large number of linear nuisance parameters. The resulting confidence sets are consistent, and have optimal local asymptotic power for many parameter configurations. We recommend researchers conduct sensitivity analyses to show what conclusions can be drawn under various restrictions on the possible differences in trends.\  }, author = {Ashesh Rambachan and Jonathan Roth} } @workingpaper {665566, title = {When Is Parallel Trends Sensitive to Functional Form?}, year = {Working Paper}, abstract = {This paper assesses when the validity of difference-in-differences and related estimators is dependent on functional form. We provide a novel characterization: the parallel trends assumption holds under all monotonic transformations of the outcome if and only if a stronger "parallel trends"-type assumption holds on the entire distribution of potential outcomes. This assumption necessarily holds when treatment is (as if) randomly assigned, but will often be implausible in settings where randomization fails. We show further that the average treatment effect on the treated (ATT) is identified regardless of functional form if and only if the entire distribution of untreated outcomes is identified for the treated group. It is thus impossible to construct an estimator that is consistent (or unbiased) for the ATT regardless of functional form unless one imposes assumptions that identify the entire counterfactual distribution of untreated potential outcomes. Our results suggest that researchers who wish to point-identify the ATT should justify one of the following: (i) why treatment is randomly assigned, (ii) why the chosen functional form is correct at the exclusion of others, or (iii) a method for inferring the entire counterfactual distribution of untreated potential outcomes.}, author = {Jonathan Roth and Pedro H.C. Sant{\textquoteright}Anna} } @article {663012, title = {Design-Based Uncertainty for Quasi-Experiments}, year = {Working Paper}, abstract = {Social scientists are often interested in estimating causal effects in settings where all units in the population are observed (e.g. all 50 US states). Design-based approaches, which view the realization of treatment assignments as the source of randomness, may be more appealing than standard sampling-based approaches in such contexts. This paper develops a design-based theory of uncertainty suitable for quasi-experimental settings, in which the researcher estimates the treatment effect as if treatment were randomly assigned, but in reality treatment probabilities may depend in unknown ways on the potential outcomes. We first study the properties of the simple difference-in-means (SDIM) estimator. The SDIM is unbiased for a finite-population design-based analog to the average treatment effect on the treated (ATT) if treatment probabilities are uncorrelated with the potential outcomes in a finite population sense. We further derive expressions for the variance of the SDIM estimator and a central limit theorem under sequences of finite populations with growing sample size. We then show how our results can be applied to analyze the distribution and estimand of difference-in-differences (DiD) and two-stage least squares (2SLS) from a design-based perspective when treatment is not completely randomly assigned.}, author = {Ashesh Rambachan and Jonathan Roth} } @workingpaper {667961, title = {Why Do Sectoral Employment Programs Work? Evidence from WorkAdvance}, year = {Working Paper}, abstract = {This paper examines the evidence from randomized evaluations of sector-focused training programs that target low-wage workers and combine upfront screening, occupational and soft skills training, and wraparound services. The programs generate substantial and persistent earnings gains (11 to 40 percent) following training completion. Theoretical mechanisms for program impacts are explored for the WorkAdvance demonstration. Earnings gains are generated by getting participants into higher-wage jobs in higher-earning industries and occupations not just by raising employment. Training in transferable and certifiable skills (likely under-provided from poaching concerns) and reductions of employment barriers to high-wage sectors for non-traditional workers appear to play key roles.}, author = {Lawrence Katz and Jonathan Roth and Richard Hendra and Kelsey Schaberg} } @article {641866, title = {Inference for Linear Conditional Moment Inequalities}, journal = {Revision requested, Review of Economic Studies}, year = {Working Paper}, abstract = {We consider inference based on linear conditional moment inequalities, which arise in a wide variety of economic applications, including many structural models. \ We show that linear conditional structure greatly simplifies confidence set construction, allowing for computationally tractable projection inference in settings with nuisance parameters. \ Next, we derive least favorable critical values that avoid conservativeness due to projection. \ Finally, we introduce a conditional inference approach which ensures a strong form of insensitivity to slack moments, as well as a hybrid technique which combines the least favorable and conditional methods. \ Our conditional and hybrid approaches are new even in settings without nuisance parameters. \ We find good performance in simulations based on Wollmann (2018), especially for the hybrid approach.}, author = {Isaiah Andrews and Jonathan Roth and Ariel Pakes} } @workingpaper {626449, title = {Pre-test with Caution: Event-study Estimates After Testing for Parallel Trends}, year = {Working Paper}, abstract = {Tests for pre-existing trends ("pre-trends") are a common way of assessing the plausibility of the parallel trends assumption in difference-in-differences and related research designs. This paper highlights some important limitations of pre-trends testing. From a theoretical perspective, I analyze the distribution of conventional estimates and confidence intervals conditional on surviving a pre-test for pre-trends. I show that in non-pathological cases, the bias of conventional estimates conditional on passing a pre-test can be worse than the unconditional bias. Thus, pre-tests meant to mitigate bias and coverage issues in published work can in fact exacerbate them. I empirically investigate the practical relevance of these concerns in simulations based on a systematic review of recent papers in leading economics journals. I find that conventional pre-tests are often underpowered against plausible violations of parallel trends that produce bias of a similar magnitude as the estimated treatment effect. Distortions from pre-testing can also be substantial. Finally, I discuss alternative approaches that can improve upon the standard practice of relying on pre-trends testing.}, author = {Jonathan Roth} } @workingpaper {546616, title = {Union Reform and Teacher Turnover: Evidence from Wisconsin{\textquoteright}s Act 10}, year = {Working Paper}, abstract = {This paper studies teacher attrition in Wisconsin following Act 10, a policy change which severely weakened teachers{\textquoteright} unions and capped wage growth for teachers. I document a sharp short-run increase in teacher turnover after the Act was passed, driven almost entirely by teachers over the minimum retirement age of 55, whose turnover rate doubled from 17 to 35 percent. Such teachers faced strong incentives to retire before the end of pre-existing collective bargaining agreements in order to secure collectively-bargained retirement benefits (e.g. healthcare), which no longer fell under the scope of collective bargaining after the Act. I find much more modest long-run increases in teacher turnover, consistent with previous estimates of labor supply elasticities. I then attempt to evaluate the effect of the wave of retirements following Act 10 on education quality using grade-level value-added metrics. I find suggestive evidence that student academic performance increased in grades with teachers who retired following the reform, and I obtain similar results when instrumenting for retirement using the pre-existing age distribution of teachers. Differences in value-added between retirees and their replacements can potentially explain some, but not all, of the observed academic improvements.}, author = {Jonathan Roth} } @article {641867, title = {Bias In, Bias Out? Evaluating the Folk Wisdom}, journal = {1st Symposium on the Foundations of Responsible Computing (FORC 2020), LIPIcs}, volume = {156}, year = {2020}, month = {2020}, pages = {6:1-6:15}, abstract = {We evaluate the folk wisdom that algorithmic decision rules trained on data produced by biased human decision-makers necessarily reflect this bias. We consider a setting where training labels are only generated if a biased decision-maker takes a particular action, and so "biased" training data arise due to discriminatory selection into the training data. In our baseline model, the more biased the decision-maker is against a group, the more the algorithmic decision rule favors that group. We refer to this phenomenon as bias reversal. We then clarify the conditions that give rise to bias reversal. Whether a prediction algorithm reverses or inherits bias depends critically on how the decision-maker affects the training data as well as the label used in training. We illustrate our main theoretical results in a simulation study applied to the New York City Stop, Question and Frisk dataset.}, url = {https://doi.org/10.4230/LIPIcs.FORC.2020.6}, author = {Ashesh Rambachan and Jonathan Roth} }