@article {686416, title = {Measuring returns to experience using supervisor ratings of observed performance: The case of classroom teachers}, journal = {Journal of Policy Analysis and Management}, year = {In Press}, abstract = { We study the returns to experience in teaching, estimated using supervisor ratings from classroom observations. We describe the assumptions required to interpret changes in observation ratings over time as the causal effect of experience on performance. We compare two difference-in-differences strategies: the two-way fixed effects estimator common in the literature, and an alternative which avoids potential bias arising from effect heterogeneity. Using data from Tennessee and Washington, DC, we show empirical tests relevant to assessing the identifying assumptions and substantive threats{\textemdash}e.g., leniency bias, manipulation, changes in incentives or job assignments{\textemdash}and find our estimates are robust to several threats. }, url = {https://doi.org/10.1002/pam.22584}, author = {Courtney Bell and Jessalynn James and Taylor, Eric S. and James Wyckoff} } @article {696156, title = {Teachers{\textquoteright} use of class time and student achievement}, journal = {Economics of Education Review}, volume = {94}, year = {2023}, pages = {102405}, abstract = { We study teachers{\textquoteright} choices about how to allocate class time across different instructional activities, for example, lecturing, open discussion, or individual practice. Our data come from secondary schools in England, specifically classes preceding GCSE exams. Students score higher in math when their teacher devotes more class time to individual practice and assessment. In contrast, students score higher in English if there is more discussion and work with classmates. Class time allocation predicts test scores separate from the quality of the teacher{\textquoteright}s instruction during the activities. These results suggest opportunities to improve student achievement without changes in teachers{\textquoteright} skills. }, url = {https://doi.org/10.1016/j.econedurev.2023.102405 }, author = {Simon Burgess and Shenila Rawal and Taylor, Eric S.} } @article {632794, title = {Does evaluation change teacher effort and performance? Quasi-experimental evidence from a policy of retesting students}, journal = {Review of Economics and Statistics}, volume = {104}, number = {3}, year = {2022}, month = {2022}, pages = {417{\textendash}430}, abstract = { We document measurable, lasting gains in student achievement caused by a change in teachers{\textquoteright} evaluation incentives. A short-lived rule created a discontinuity in teachers{\textquoteright} incentives when allocating effort across their assigned students: students who failed an initial end-of-year test were retested a few weeks later, and then only the higher of the two scores was used when calculating the teacher{\textquoteright}s evaluation score. One year later, long after the discontinuity in incentives had ended, retested students scored 0.03σ higher than non-retested students. Otherwise identical students were treated differently by teachers because of evaluation incentives, despite arguably equal returns to teacher effort. }, url = {https://direct.mit.edu/rest/article/104/3/417/97737/Does-Evaluation-Change-Teacher-Effort-and}, author = {Esteban Aucejo and Teresa Romano and Taylor, Eric S.} } @article {632795, title = {Teacher peer observation and student test scores: Evidence from a field experiment in English secondary schools.}, journal = {Journal of Labor Economics}, volume = {39}, number = {4}, year = {2021}, pages = {1155-1186}, abstract = { This paper reports on a field experiment in 82 high schools trialing a low-cost intervention in schools{\textquoteright} operations: teachers working in the same school observed and scored each other{\textquoteright}s teaching. Students in treatment schools scored 0.07σ higher on math and English exams. Teachers were further randomly assigned to roles{\textemdash}observer and observee{\textemdash}and students of both types benefited, observers{\textquoteright} students perhaps more so. Doubling the number of observations produced no difference in student outcomes. Treatment effects were larger for otherwise low-performing teachers. }, url = {https://doi.org/10.1086/712997 }, author = {Simon Burgess and Shenila Rawal and Taylor, Eric S.} } @article {377726, title = {Learning job skills from colleagues at work: Evidence from a field experiment using teacher performance data}, journal = {American Economic Journal: Economic Policy}, volume = {12}, number = {1}, year = {2020}, pages = {359-388}, abstract = { We study a program designed to encourage learning from coworkers among school teachers. In an experiment, we document gains in job performance when high- and low-skilled teachers are paired and asked to work together on improving their skills. Pairs are matched on specific skills measured in prior evaluations. Each pair includes a target teacher who scores low in one or more of nineteen skills, and a partner who scores high in (many of) the target{\textquoteright}s deficient skills. Student achievement improved 0.12 standard deviations in low-skilled teachers{\textquoteright} classrooms. Improvements are likely the result of target teachers learning skills from their partner. }, url = {https://www.aeaweb.org/articles?id=10.1257/pol.20170709}, author = {Papay, John P. and Taylor, Eric S. and Tyler, John H. and Laski, Mary} } @article {518701, title = {Skills, job tasks, and productivity in teaching: Evidence from a randomized trial of instruction practices}, journal = {Journal of Labor Economics}, volume = {36}, number = {3}, year = {2018}, month = {2018}, pages = {711-742}, abstract = {I study how teachers{\textquoteright} assigned job tasks{\textemdash}the basic practices they are asked to use in the classroom{\textemdash}affect the returns to math skills in teacher productivity. The results demonstrate the importance of distinguishing between workers{\textquoteright} skills and workers{\textquoteright} job tasks. I examine a randomized trial of different approaches to teaching math, each approach codified in a set of day-to-day tasks. Teachers were tested to measure their math skills. Teacher productivity{\textemdash}measured with student test scores{\textemdash}is increasing in math skills when teachers use conventional {\textquotedblleft}direct instruction{\textquotedblright} practices: explaining and modeling math rules and procedures. The relationship is weaker, perhaps negative, for newer {\textquotedblleft}student-led{\textquotedblright} instruction tasks.}, url = {https://www.journals.uchicago.edu/doi/abs/10.1086/696144}, author = {Taylor, Eric S.} } @article {377736, title = {Teacher applicant hiring and teacher performance: Evidence from DC Public Schools}, journal = {Journal of Public Economics}, volume = {166}, year = {2018}, pages = {81-97}, abstract = {Selecting more productive employees among a pool of job applicants can be a cost-effective means of improving organizational performance and may be particularly important in the public sector. We study the relationship among applicant characteristics, hiring outcomes, and job performance for teachers in the Washington DC Public Schools. Applicants{\textquoteright} academic background (e.g., undergraduate GPA) is essentially uncorrelated with hiring. Screening measures (written assessments, interviews, and sample lessons) help applicants get jobs by placing them on a list of recommended candidates, but they are only weakly associated with the likelihood of being hired conditional on making the list. Yet both academic background and screening measures strongly predict teacher job performance, suggesting considerable scope for improving schools via the selection process.}, url = {https://www.sciencedirect.com/science/article/abs/pii/S0047272718301555}, author = {Jacob, Brian A. and Jonah E. Rockoff and Taylor, Eric S. and Lindy, Benjamin and Rosen, Rachel} } @article {518706, title = {Virtual classrooms: How online college courses affect student success}, journal = {American Economic Review}, volume = {107}, number = {9}, year = {2017}, pages = {2855-2875}, abstract = {Online college courses are a rapidly expanding feature of higher education, yet little research identifies their effects relative to traditional in-person classes. Using an instrumental variables approach, we find that taking a course online, instead of in-person, reduces student success and progress in college. Grades are lower both for the course taken online and in future courses. Students are less likely to remain enrolled at the university. These estimates are local average treatment effects for students with access to both online and in-person options; for other students online classes may be the only option for accessing college-level courses.}, url = {https://www.aeaweb.org/articles?id=10.1257/aer.20151193}, author = {Bettinger, Eric P. and Lindsay Fox and Susanna Loeb and Taylor, Eric S.} } @article {518696, title = {The effects of class size in online college courses: Experimental evidence}, journal = {Economics of Education Review}, volume = {58}, year = {2017}, pages = {68-85}, abstract = {Class size is a first-order consideration in the study of education cost and effectiveness. Yet little is known about the effects of class size on student outcomes in online college classes, even though online courses have become commonplace in many institutions of higher education. We study a field experiment in which college students were quasi-randomly assigned to either regular sized classes or slightly larger classes. Regular classes had, on average, 31 students and treatment classes were, on average, ten percent larger. The experiment was conducted at DeVry University, one of the nation{\textquoteright}s largest for-profit postsecondary institutions, and included over 100,000 student course enrollments in nearly 4,000 classes across 111 different undergraduate and graduate courses. We examine class size effects on student success in the course and subsequent persistence in college. We find little evidence of effects on average or for a range of course types. Given the large sample, our estimates are precise, suggesting that small class size changes have little impact in online settings.}, url = {http://www.sciencedirect.com/science/article/pii/S0272775716302473}, author = {Bettinger, Eric P. and Christopher Doss and Susanna Loeb and Aaron Rogers and Taylor, Eric S.} } @article {377741, title = {When inputs are outputs: The case of graduate student instructors}, journal = {Economics of Education Review}, volume = {52}, year = {2016}, pages = {63-76}, abstract = {We examine graduate student teaching as an input to two production processes: the education of undergraduates and the development of graduate students themselves. Using fluctuations in full-time faculty availability as an instrument, we find undergraduates are more likely to major in a subject if their first course in the subject was taught by a graduate student, a result opposite of estimates that ignore selection. Additionally, graduate students who teach more frequently graduate earlier and are more likely to subsequently be employed by a college or university.}, url = {http://www.sciencedirect.com/science/article/pii/S027277571630036X}, author = {Bettinger, Eric P. and Bridget Terry Long and Taylor, Eric S.} } @article {255226, title = {Spending more of the school day in math class: Evidence from a regression discontinuity in middle school}, journal = {Journal of Public Economics}, volume = {114}, year = {2014}, pages = {162-181}, abstract = {For students whose math skills lag expectations, public schools often increase the fraction of the school day spent on math instruction. Studying middle-school students and using regression discontinuity methods, I estimate the causal effect of requiring two math classes{\textemdash}one remedial, one regular{\textemdash}instead of just one class. Math achievement grows much faster under the requirement, 0.16{\textendash}0.18 student standard deviations. Yet, one year after returning to a regular one-class schedule, the initial gains decay by as much as half, and two years later just one-third of the initial treatment effect remains. This pattern of decaying effects over time mirrors other educational interventions{\textemdash}assignment to a more skilled teacher, reducing class size, retaining students{\textemdash}but spending more time on math carries different costs. One cost is notable, more time in math crowds out instruction in other subjects.}, url = {http://www.sciencedirect.com/science/article/pii/S004727271400142X}, author = {Taylor, Eric S.} } @article {255221, title = {The effect of evaluation on teacher performance}, journal = {American Economic Review}, volume = {102}, number = {7}, year = {2012}, pages = {3628-3651}, abstract = {Teacher performance evaluation has become a dominant theme in school reform efforts. Yet, whether evaluation changes the performance of teachers, the focus of this paper, is unknown. Instead, evaluation has largely been studied as an input to selective dismissal decisions. We study mid-career teachers for whom we observe an objective measure of productivity -- value-added to student achievement -- before, during, and after evaluation. We find teachers are more productive in post-evaluation years, with the largest improvements among teachers performing relatively poorly ex-ante. The results suggest teachers can gain information from evaluation and subsequently develop new skills, increase long-run effort, or both.}, url = {https://www.aeaweb.org/articles.php?doi=10.1257/aer.102.7.3628}, author = {Taylor, Eric S. and Tyler, John H.} } @article {255216, title = {Information and employee evaluation: Evidence from a randomized intervention in public schools}, journal = {American Economic Review}, volume = {102}, number = {7}, year = {2012}, pages = {3184-3213}, abstract = {We examine how employers learn about worker productivity in a randomized pilot experiment which provided objective estimates of teacher performance to school principals. We test several hypotheses that support a simple Bayesian learning model with imperfect information. First, the correlation between performance estimates and prior beliefs rises with more precise objective estimates and more precise subjective priors. Second, new information exerts greater influence on posterior beliefs when it is more precise and when priors are less precise. Employer learning affects job separation and productivity in schools, increasing turnover for teachers with low performance estimates and producing small test score improvements. (JEL D83, I21, J24, J45)}, url = {https://www.aeaweb.org/articles.php?doi=10.1257/aer.102.7.3184}, author = {Jonah E. Rockoff and Douglas O. Staiger and Thomas J. Kane and Taylor, Eric S.} } @article {255211, title = {Identifying effective classroom practices using student achievement data}, journal = {Journal of Human Resources}, volume = {46}, number = {3}, year = {2011}, pages = {587-613}, abstract = {Research continues to find large differences in student achievement gains across teachers{\textquoteright} classrooms. The variability in teacher effectiveness raises the stakes on identifying effective teachers and teaching practices. This paper combines data from classroom observations of teaching practices and measures of teachers{\textquoteright} ability to improve student achievement as one contribution to these questions. We find that observation measures of teaching effectiveness are substantively related to student achievement growth and that some observed teaching practices predict achievement more than other practices. Our results provide information for both individual teacher development efforts, and the design of teacher evaluation systems.}, url = {http://jhr.uwpress.org/content/46/3/587.full.pdf}, author = {Thomas J. Kane and Taylor, Eric S. and Tyler, John H. and Amy L. Wooten} } @booklet {696155, title = {Teacher Evaluation and Training}, journal = {Handbook of the Economics of Education, Volume 7}, year = {2023}, pages = {61-141}, abstract = { Evaluation and training are important features of the employment relationship between teachers and the schools they work for. The first feature, evaluation, involves performance measures and often performance incentives linked to those measures, like bonuses or the threat of dismissal. This chapter reviews research on whether and how evaluation and incentives change teaching, including unintended effects. Potential mechanisms include changes in a teacher{\textquoteright}s effort or skills, or changes in the composition of the teacher workforce through selection. Many (quasi-)experiments document increases in the measures used to determine rewards or consequences for teachers, but it is less clear whether those increases represent improvements in student learning or welfare. Research on the second feature, training, typically focuses on formal training programs, where evidence of benefits is inconsistent at best. This chapter reviews evidence on both formal training, as well as informal ways in which teachers appear to learn new skills at work. }, url = {https://doi.org/10.1016/bs.hesedu.2023.03.002 }, author = {Taylor, Eric S.}, editor = {Eric A. Hanushek and Stephen Machin and Ludger Woessmann} } @booklet {626277, title = {Can teacher evaluation programs improve teaching?}, journal = {Getting Down to Facts II, Stanford University.}, year = {2018}, url = {https://gettingdowntofacts.com/sites/default/files/2018-09/GDTFII_Report_Taylor.pdf}, author = {Lovison, Virginia and Taylor, Eric S.} } @booklet {255231, title = {Understanding human resources in broad-access higher education}, journal = {Remaking college: The changing ecology of higher education}, year = {2014}, publisher = {Stanford University Press}, address = {Stanford, CA}, url = {https://www.sup.org/books/title/?id=23137}, author = {Susanna Loeb and Agustina Paglayan and Taylor, Eric S.}, editor = {Mitchell Stevens and Michael Kirst} } @booklet {310316, title = {Can teacher evaluation improve teaching?}, journal = {Education Next}, volume = {12}, number = {4}, year = {2012}, pages = {78-84}, url = {http://educationnext.org/files/ednext_20124_taylortyler.pdf}, author = {Taylor, Eric S. and Tyler, John H.} } @booklet {312426, title = {Evaluating teacher effectiveness}, journal = {Education Next}, volume = {11}, number = {3}, year = {2011}, pages = {55-60}, url = {http://educationnext.org/files/ednext_20113_research_kane.pdf}, author = {Thomas J. Kane and Taylor, Eric S. and Tyler, John H. and Amy L. Wooten} } @booklet {255236, title = {Using student performance data to identify effective classroom practices}, journal = {American Economic Review, Papers and Proceedings}, volume = {100}, number = {2}, year = {2010}, pages = {256-260}, url = {http://pubs.aeaweb.org/doi/pdfplus/10.1257/aer.100.2.256}, author = {John H. Tyler and Eric S. Taylor and Thomas J. Kane and Amy L. Wooten} } @workingpaper {696157, title = {Employee evaluation and skill investments: Evidence from public school teachers}, year = {Working Paper}, abstract = { When an employee expects repeated evaluation and performance incentives over time, the potential future rewards create an incentive to invest in building relevant skills. Because new skills benefit job performance, the effects of an evaluation program can persist after the rewards end or even anticipate the start of rewards. I test for persistence and anticipation effects, along with more conventional predictions, using a quasi-experiment in Tennessee schools. Performance improves with new evaluation measures, but gains are larger when the teacher expects future rewards linked to future scores. Performance rises further when incentives start and remains higher even after incentives end. }, url = {https://www.nber.org/papers/w30687}, author = {Taylor, Eric S.} } @workingpaper {310311, title = {New Technology and Teacher Productivity}, year = {Working Paper}, abstract = { I study the effects of a labor-replacing computer technology on the productivity of classroom teachers. Focusing on one occupation{\textemdash}and a setting where both workers and their job responsibilities remain fixed{\textemdash}provides an opportunity to examine the heterogeneity of effects on individual productivity. In a series of field-experiments, teachers were provided computer-aided instruction (CAI) software for use in their classrooms; CAI provides individualized tutoring and practice to students one-on-one with the computer acting as the teacher. In math classes, CAI reduces by one-fifth the variance of teacher productivity, as measured by student test score gains. The smaller variance comes both from productivity improvements for otherwise low-performing teachers, but also losses among high-performers. The change in productivity partly reflects changes in teachers{\textquoteright} level of work effort and teachers{\textquoteright} decisions about how to allocate class time. How computers affect teacher decisions and productivity is immediately relevant to both ongoing education policy debates about teaching quality and the day-to-day management of a large workforce. }, author = {Taylor, Eric S.} }