@article{JankinBenoitLaver, author = {Jankin, Slava and Benoit, Kenneth and Laver, Michael}, title = {Treating Words as Data with Error: Estimating Uncertainty in Text Statements of Policy Positions}, series = {American Journal of Political Science}, volume = {53}, journal = {American Journal of Political Science}, number = {2}, issn = {0092-5853}, doi = {10.1111/j.1540-5907.2009.00383.x}, pages = {495 -- 513}, abstract = {Political text offers extraordinary potential as a source of information about the policy positions of political actors. Despite recent advances in computational text analysis, human interpretative coding of text remains an important source of text-based data, ultimately required to validate more automatic techniques. The profession's main source of cross-national, time-series data on party policy positions comes from the human interpretative coding of party manifestos by the Comparative Manifesto Project (CMP). Despite widespread use of these data, the uncertainty associated with each point estimate has never been available, undermining the value of the dataset as a scientific resource. We propose a remedy. First, we characterize processes by which CMP data are generated. These include inherently stochastic processes of text authorship, as well as of the parsing and coding of observed text by humans. Second, we simulate these error-generating processes by bootstrapping analyses of coded quasi-sentences. This allows us to estimate precise levels of nonsystematic error for every category and scale reported by the CMP for its entire set of 3,000-plus manifestos. Using our estimates of these errors, we show how to correct biased inferences, in recent prominently published work, derived from statistical analyses of error-contaminated CMP data.}, language = {en} } @article{JankinLaverBenoit, author = {Jankin, Slava and Laver, Michael and Benoit, Kenneth}, title = {Coder Reliability and Misclassification in the Human Coding of Party Manifestos}, series = {Political Analysis}, volume = {20}, journal = {Political Analysis}, number = {1}, doi = {10.1093/pan/mpr047}, pages = {78 -- 91}, abstract = {The Comparative Manifesto Project (CMP) provides the only time series of estimated party policy positions in political science and has been extensively used in a wide variety of applications. Recent work (e.g., Benoit, Laver, and Mikhaylov 2009; Klingemann et al. 2006) focuses on nonsystematic sources of error in these estimates that arise from the text generation process. Our concern here, by contrast, is with error that arises during the text coding process since nearly all manifestos are coded only once by a single coder. First, we discuss reliability and misclassification in the context of hand-coded content analysis methods. Second, we report results of a coding experiment that used trained human coders to code sample manifestos provided by the CMP, allowing us to estimate the reliability of both coders and coding categories. Third, we compare our test codings to the published CMP "gold standard" codings of the test documents to assess accuracy and produce empirical estimates of a misclassification matrix for each coding category. Finally, we demonstrate the effect of coding misclassification on the CMP's most widely used index, its left-right scale. Our findings indicate that misclassification is a serious and systemic problem with the current CMP data set and coding process, suggesting the CMP scheme should be significantly simplified to address reliability issues.}, language = {en} } @article{DaublerBenoitJankinetal., author = {Daubler, Thomas and Benoit, Kenneth and Jankin, Slava and Laver, Michael}, title = {Natural Sentences as Valid Units for Coded Political Texts}, series = {British Journal of Political Science}, volume = {42}, journal = {British Journal of Political Science}, number = {4}, doi = {10.1017/S0007123412000105}, pages = {937 -- 951}, abstract = {All methods for analyzing text require the identification of a fundamental unit of analysis. In expert-coded content analysis schemes such as the Comparative Manifesto Project, this unit is the 'quasi-sentence': a natural sentence or a part of a sentence judged by the coder to have an independent component of meaning. Because they are subjective constructs identified by individual coders, however, quasi-sentences make text analysis fundamentally unreliable. The justification for quasi-sentences is a supposed gain in coding validity. We show that this justification is unfounded: using quasi-sentences does not produce valuable additional information in characterizing substantive political content. Using natural sentences as text units, by contrast, delivers perfectly reliable unitization with no measurable loss in content validity of the resulting estimates.}, language = {en} } @article{JankinBenoitConwayetal., author = {Jankin, Slava and Benoit, Kenneth and Conway, Drew and Laver, Michael}, title = {Crowd-Sourced Text Analysis: Reproducible and agile production of political data}, series = {American Political Science Review}, volume = {110}, journal = {American Political Science Review}, number = {2}, issn = {0003-0554}, doi = {10.1017/S0003055416000058}, pages = {278 -- 295}, abstract = {Empirical social science often relies on data that are not observed in the field, but are transformed into quantitative variables by expert researchers who analyze and interpret qualitative raw sources. While generally considered the most valid way to produce data, this expert-driven process is inherently difficult to replicate or to assess on grounds of reliability. Using crowd-sourcing to distribute text for reading and interpretation by massive numbers of non-experts, we generate results comparable to those using experts to read and interpret the same texts, but do so far more quickly and flexibly. Crucially, the data we collect can be reproduced and extended transparently, making crowd-sourced datasets intrinsically reproducible. This focuses researchers' attention on the fundamental scientific objective of specifying reliable and replicable methods for collecting the data needed, rather than on the content of any particular dataset. We also show that our approach works straightforwardly with different types of political text, written in different languages. While findings reported here concern text analysis, they have far-reaching implications for expert-generated data in the social sciences.}, language = {en} } @article{LoweBenoitJankinetal., author = {Lowe, Will and Benoit, Kenneth and Jankin, Slava and Laver, Michael}, title = {Scaling Policy Preferences From Coded Political Texts}, series = {Legislative Studies Quarterly}, volume = {36}, journal = {Legislative Studies Quarterly}, number = {1}, doi = {10.1111/j.1939-9162.2010.00006.x}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:b1570-opus4-27945}, pages = {123 -- 155}, abstract = {Scholars estimating policy positions from political texts typically code words or sentences and then build left-right policy scales based on the relative frequencies of text units coded into different categories. Here we reexamine such scales and propose a theoretically and linguistically superior alternative based on the logarithm of odds- ratios. We contrast this scale with the current approach of the Comparative Manifesto Project (CMP), showing that our proposed logit scale avoids widely acknowledged flaws in previous approaches. We validate the new scale using independent expert surveys. Using existing CMP data, we show how to estimate more distinct policy dimensions, for more years, than has been possible before, and make this dataset publicly available. Finally, we draw some conclusions about the future design of coding schemes for political texts.}, language = {en} } @article{BenoitLaverLoweetal., author = {Benoit, Kenneth and Laver, Michael and Lowe, Will and Jankin, Slava}, title = {How to scale coded text units without bias: A response to Gemenis}, series = {Electoral Studies}, journal = {Electoral Studies}, number = {3}, edition = {31}, doi = {10.1016/j.electstud.2012.05.004}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:b1570-opus4-28033}, pages = {605 -- 608}, abstract = {Coding non-manifesto documents as if they were genuine policy platforms produced at election time clearly raises serious issues with error when these codings are used in the standard manner to estimate left-right policy positions. In addition to the long term solution of improving the document base of the Manifesto Project identified by Gemenis (2012), we argue that immediate gains in manifesto-based estimates of policy positions can be realised by using the confrontational logit scales from Lowe et al. (2011), which addresses the problems of scale content and scale construction that are exacerbated by but not unique to the problems found in proxy documents.}, language = {en} }