anonymous-submission-acl2025 commited on
Commit
48609bb
·
1 Parent(s): 1a79dcb
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
1/replication_package/README.txt ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### README for Herzog, Baron, and Gibbons (Forthcoming), "Anti-Normative
2
+ ### Messaging, Group Cues, and the Nuclear Ban Treaty"; forthcoming at The
3
+ ### Journal of Politics.
4
+
5
+ ### This README details instructions and files pertaining to survey, data, and
6
+ ### analysis code for replication purposes. Please direct inquiries to
7
8
+
9
+ ## Meta information and instructions.
10
+ # Performance assessments:
11
+ - Measured total run time (seconds), using
12
+ R CMD BATCH run_hbg_replication.R 2>&1 replication_out.out
13
+ - 137.820 (see run_time outfile for machine-specific statistics).
14
+ - Hardware used.
15
+ - Lenovo ThinkPad X1 Carbon 5th Generation;
16
+ - Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz;
17
+ - Physical Memory Array; Maximum Capacity 16GB.
18
+ - Operating system used.
19
+ - Linux Mint 19 Tara Cinnamon 64-bit (4.10.0-38-generic).
20
+
21
+ # Dependencies:
22
+ 1.) R version 3.6.3 (2020-02-09) -- "Holding the Windstock."
23
+ - Required packages.
24
+ - plyr
25
+ - car
26
+ - anesrake
27
+ - sandwich
28
+ 2.) LaTeX (for typesetting tabular output).
29
+ - Required and recommended packages.
30
+ - array
31
+ - booktabs
32
+ - float
33
+ - makecell
34
+ - multirow
35
+ - siunitx
36
+
37
+ # Instructions:
38
+ 1.) Set working directory to the replication-file parent directory.
39
+ - All scripts assume ~/Downloads/hbg_replication as the parent directory.
40
+ 2.) For all operating systems, the scripts/run_hbg_replication.R script may be
41
+ executed in an R instance.
42
+ - Open the hbg_replication.R script in R.
43
+ - Run all commands in the console.
44
+ 3.) For UNIX/UNIX-like systems (MacOS, Linux, Windows 10 Subsysten for Linux),
45
+ it is recommended to run the script in a terminal instance.
46
+ - Enter either
47
+ R CMD BATCH scripts/run_hbg_replication.R 2>&1 cli_script.out
48
+ which will produce an outfile containing command-line interface output in
49
+ the cli_script.out outfile; or,
50
+ Rscript scripts/run_hbg_replication.R
51
+ though Rscript will not echo output.
52
+ 4.) Commands may also be run in an interactive R session without use of
53
+ run_hbg_replication.R, e.g., in RStudio.
54
+ - Working directory will have to be set manually; in the R console, enter
55
+ setwd("~/hbg_replication")
56
+ - The output directory will also need to be created separately; once the
57
+ working directory has been set to the parent directory, in the R console,
58
+ enter
59
+ dir.create("output")
60
+
61
+ ## Directories and files.
62
+ # ./meta:
63
+ 1.) hbg_instrument.pdf
64
+ - Herzog, Baron, and Gibbons (Forthcoming) survey instrument.
65
+ - The instrument does not describe randomization; treatment assignment was
66
+ randomized using Qualtrics embedded data, randomized using Qualtrics'
67
+ internal "Evenly Present Elements" algorithm. Some answer choice options
68
+ were also randomized in order to avoid ordering effects; questions employing
69
+ internal randomization include pid3, join_tpnw, and the row order of the
70
+ attitudinal outcomes battery.
71
+ 2.) hbg_codebook.txt
72
+ - Herzog, Baron, and Gibbons codebook.
73
+ - Details coding values for embedded data and survey questions in
74
+ all included data files.
75
+ - Notes variable recoding values used in cleaned experimental data
76
+ (tpnw_data.csv), used for analysis.
77
+ 3.) hbg_pap.pdf
78
+ - Herzog, Baron, and Gibbons pre-analysis plan.
79
+ - Details all analysis decisions, per research design pre-registered
80
+ with EGAP prior to collecting experimental data.
81
+
82
+ # ./data:
83
+ 1.) tpnw_aware_raw.csv
84
+ - Data from Herzog, Baron, and Gibbons YouGov study.
85
+ - Note that some variables have been excluded as they are used in separate
86
+ studies.
87
+ 2.) tpnw_orig_income.csv
88
+ - Data from original income coding from Herzog, Baron, and Gibbons.
89
+ - Note that some variables have been excluded as they are used in separate
90
+ studies.
91
+ 3.) tpnw_raw.csv
92
+ - Data from Herzog, Baron, and Gibbons experimental survey.
93
+ - Note that some variables have been excluded as they are used in separate
94
+ studies.
95
+
96
+ # ./output (produced by either ../scripts/hbg.sh or
97
+ # ../scripts/run_hbg_replication.R):
98
+ 1.) ./hbg_log.txt
99
+ - Output for experimental data cleaning and analysis (produced by either
100
+ ../scripts/hbg.sh or ../scripts/run_hbg_replication.R).
101
+ 2.) ./run_time
102
+ - Output for total run time (produced by either ../scripts/hbg.sh or
103
+ ../scripts/run_hbg_replication.R).
104
+ 4.) ./fg%.eps
105
+ - .eps images of figures produced by ../scripts/hbg_replication.R);
106
+ inventoried below.
107
+ 5.) ./%_tab.tex
108
+ - .tex files containing LaTeX tables produced by
109
+ ../scripts/hbg_replication.R; inventoried below.
110
+
111
+ # ./scripts:
112
+ 1.) run_hbg_replication.R
113
+ - "Run file" to run replication code and produce console and run-time output
114
+ in R (all systems); produces
115
+ - ../output/hbg_log.txt
116
+ - Output for all analyses and results.
117
+ - ../output/run_time
118
+ - Output for total run time.
119
+ 2.) helper_functions.R
120
+ - R source file containing replication code helper functions.
121
+ 3.) hbg_cleaning.R
122
+ - Cleaning script; outputs cleaned experimental dataset including anesrake
123
+ weights.
124
+ - ../data/tpnw_data.csv
125
+ - Cleaned experimental data.
126
+ - ../data/tpnw_aware.csv
127
+ - Cleaned YouGov data.
128
+ 4.) hbg_analysis.R
129
+ - Analysis script; outputs analysis results in graphical, tabular, and RData
130
+ formats.
131
+ - ../output/fg1.eps
132
+ - .eps image of Figure 1.
133
+ - ../output/%_tab.tex
134
+ - .tex files containing LaTeX markup of all tables.
135
+ - balance_tab.tex
136
+ - Table demonstrating covariate balance across arms.
137
+ - main_results_tab.tex
138
+ - Table containing main results.
139
+ - atts_tab.tex
140
+ - Table containing attitudinal battery results.
141
+ - pid_support_tab.tex
142
+ - Table containing results by partisan identification.
143
+ - ideo_support_tab.tex
144
+ - Table containing results by political ideology.
145
+ - weighted_main_results_tab.tex
146
+ - Table containing weighted main results.
147
+ - ../output/hbg_replication_out.RData
148
+ - .RData file containing all analysis results.
149
+ 5.) hbg_group_cue.R
150
+ - Script to produce group cue graphic.
151
+ - hbg_fgc1.eps
152
+ - .eps image of Figure C1.
1/replication_package/data/tpnw_aware_raw.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9151d260624e3a451ce99b19a1e2c482998842153d870f996280fad9489a079a
3
+ size 222416
1/replication_package/data/tpnw_orig_income.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17125db74665bbf70590e1d1d8a4f769c6ed86dc6ca7465b99a8ec5dd786bb1
3
+ size 6934
1/replication_package/data/tpnw_raw.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0830f9d626bebb6e8ab21f8a2a4a5ca49b8276f436ea6464533fcb9fcf09f2f8
3
+ size 352860
1/replication_package/meta/hbg_codebook.txt ADDED
@@ -0,0 +1,1009 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ================================================================================
2
+ RAW EXPERIMENTAL DATA (tpnw_raw.csv)
3
+ ================================================================================
4
+
5
+ --------------------------------------------------------------------------------
6
+ StartDate
7
+ --------------------------------------------------------------------------------
8
+
9
+ Qualtrics embedded data field (more information is available on Qualtrics'
10
+ website at https://www.qualtrics.com/support/survey-platform/
11
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
12
+
13
+ Date and time that respondent started the survey
14
+
15
+ --------------------------------------------------------------------------------
16
+ EndDate
17
+ --------------------------------------------------------------------------------
18
+
19
+ Qualtrics embedded data field (more information is available on Qualtrics'
20
+ website at https://www.qualtrics.com/support/survey-platform/
21
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
22
+
23
+ Date and time that respondent finished the survey
24
+
25
+ --------------------------------------------------------------------------------
26
+ Status
27
+ --------------------------------------------------------------------------------
28
+
29
+ Qualtrics embedded data field (more information is available on Qualtrics'
30
+ website at https://www.qualtrics.com/support/survey-platform/
31
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
32
+
33
+ Indicator of the type of response collected.
34
+
35
+ 0 - IP Address: A normal response
36
+ 1 - Survey Preview: A preview response
37
+ 2 - Survey Test: A test response
38
+ 4 - Imported: An imported response
39
+ 8 - Spam: A possible spam response
40
+ 9 - Preview Spam: A possible spam response submitted through the preview link
41
+ 12 - Imported Spam: A possible spam response that was imported
42
+ 16 - Offline: A Qualtrics Offline App response
43
+ 17 - Offline Preview: Previews submitted through the Qualtrics Offline App. This
44
+ feature is deprecated in latest versions of the app
45
+
46
+ --------------------------------------------------------------------------------
47
+ Progress
48
+ --------------------------------------------------------------------------------
49
+
50
+ Qualtrics embedded data field (more information is available on Qualtrics'
51
+ website at https://www.qualtrics.com/support/survey-platform/
52
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
53
+
54
+ Indicates the progress a respondent made before finishing the survey.
55
+
56
+ 100 - Respondent completed the survey or was screened out
57
+
58
+ --------------------------------------------------------------------------------
59
+ Duration..in.seconds.
60
+ --------------------------------------------------------------------------------
61
+
62
+ Qualtrics embedded data field (more information is available on Qualtrics'
63
+ website at https://www.qualtrics.com/support/survey-platform/
64
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
65
+
66
+ Number of seconds it took a respondent to complete the survey.
67
+
68
+ --------------------------------------------------------------------------------
69
+ Finished
70
+ --------------------------------------------------------------------------------
71
+
72
+ Qualtrics embedded data field (more information is available on Qualtrics'
73
+ website at https://www.qualtrics.com/support/survey-platform/
74
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
75
+
76
+ Indicates whether a respondent finished the survey.
77
+
78
+ 1 - Respondent finished the survey or was screened out
79
+
80
+ --------------------------------------------------------------------------------
81
+ RecordedDate
82
+ --------------------------------------------------------------------------------
83
+
84
+ Qualtrics embedded data field (more information is available on Qualtrics'
85
+ website at https://www.qualtrics.com/support/survey-platform/
86
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
87
+
88
+ Date that respondent's survey was recorded in Qualtrics.
89
+
90
+ --------------------------------------------------------------------------------
91
+ ResponseId
92
+ --------------------------------------------------------------------------------
93
+
94
+ Qualtrics embedded data field (more information is available on Qualtrics'
95
+ website at https://www.qualtrics.com/support/survey-platform/
96
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
97
+
98
+ Character variable indicating unique respondent ID.
99
+
100
+ --------------------------------------------------------------------------------
101
+ DistributionChannel
102
+ --------------------------------------------------------------------------------
103
+
104
+ Qualtrics embedded data field (more information is available on Qualtrics'
105
+ website at https://www.qualtrics.com/support/survey-platform/
106
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
107
+
108
+ Character variable indicating method of survey distribution.
109
+
110
+ "anonymous" - Survey was distributed without collecting respondent data
111
+
112
+ --------------------------------------------------------------------------------
113
+ UserLanguage
114
+ --------------------------------------------------------------------------------
115
+
116
+ Qualtrics embedded data field (more information is available on Qualtrics'
117
+ website at https://www.qualtrics.com/support/survey-platform/
118
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
119
+
120
+ Character variable indicating respondent's language code.
121
+
122
+ "EN" - Respondent took the survey in English
123
+
124
+ --------------------------------------------------------------------------------
125
+ psid
126
+ --------------------------------------------------------------------------------
127
+
128
+ Dynata embedded data field
129
+
130
+ Character variable uniquely identifying a respondent and specific project
131
+ (project specific ID).
132
+
133
+ --------------------------------------------------------------------------------
134
+ pid
135
+ --------------------------------------------------------------------------------
136
+
137
+ Dynata embedded data field
138
+
139
+ Numeric variable uniquely identifying a panelist (panelist ID).
140
+
141
+ --------------------------------------------------------------------------------
142
+ consent
143
+ --------------------------------------------------------------------------------
144
+
145
+ Custom embedded data field
146
+
147
+ Indicator of whether a respondent consented to participate in the survey.
148
+
149
+ 0 - Respondent did not consent
150
+ 1 - Respondent consented
151
+
152
+ --------------------------------------------------------------------------------
153
+ new_income_q
154
+ --------------------------------------------------------------------------------
155
+
156
+ Custom embedded data field
157
+
158
+ Indicator of usage of new income demographic question format.
159
+
160
+ "" - Old income question
161
+ 1 - new income question
162
+
163
+ --------------------------------------------------------------------------------
164
+ confirmation_code
165
+ --------------------------------------------------------------------------------
166
+
167
+ Custom embedded data field
168
+
169
+ Numeric code provided to each respondent for response recording after completing
170
+ the survey.
171
+
172
+ --------------------------------------------------------------------------------
173
+ treatment
174
+ --------------------------------------------------------------------------------
175
+
176
+ Custom embedded data field
177
+
178
+ Numeric variable indicating treatment arm to which a respondent was assigned
179
+
180
+ 0 - Control
181
+ 1 - Group Cue
182
+ 2 - Security Cue
183
+ 3 - Norms Cue
184
+ 4 - Institutions Cue
185
+
186
+ --------------------------------------------------------------------------------
187
+ birthyr
188
+ --------------------------------------------------------------------------------
189
+
190
+ Demographic question
191
+
192
+ Respondent's birth year (numeric entry only).
193
+
194
+ --------------------------------------------------------------------------------
195
+ gender
196
+ --------------------------------------------------------------------------------
197
+
198
+ Demographic question
199
+
200
+ Respondent's self-reported gender.
201
+
202
+ 0 - Male
203
+ 1 - Female
204
+ 95 - Other
205
+
206
+ --------------------------------------------------------------------------------
207
+ gender_95_TEXT
208
+ --------------------------------------------------------------------------------
209
+
210
+ Demographic question
211
+
212
+ Respondent's self-reported gender (if Other; text entry).
213
+
214
+ --------------------------------------------------------------------------------
215
+ state
216
+ --------------------------------------------------------------------------------
217
+
218
+ Demographic question
219
+
220
+ Respondent's state of residence (recoded to character strings in cleaned
221
+ experimental data).
222
+
223
+ 1 - Alabama
224
+ 2 - Alaska
225
+ 4 - Arizona
226
+ 5 - Arkansas
227
+ 6 - California
228
+ 8 - Colorado
229
+ 9 - Connecticut
230
+ 10 - Delaware
231
+ 11 - District of Columbia
232
+ 12 - Florida
233
+ 13 - Georgia
234
+ 15 - Hawaii
235
+ 16 - Idaho
236
+ 17 - Illinois
237
+ 18 - Indiana
238
+ 19 - Iowa
239
+ 20 - Kansas
240
+ 21 - Kentucky
241
+ 22 - Louisiana
242
+ 23 - Maine
243
+ 24 - Maryland
244
+ 25 - Massachusetts
245
+ 26 - Michigan
246
+ 27 - Minnesota
247
+ 28 - Mississippi
248
+ 29 - Missouri
249
+ 30 - Montana
250
+ 31 - Nebraska
251
+ 32 - Nevada
252
+ 33 - New Hampshire
253
+ 34 - New Jersey
254
+ 35 - New Mexico
255
+ 36 - New York
256
+ 37 - North Carolina
257
+ 38 - North Dakota
258
+ 39 - Ohio
259
+ 40 - Oklahoma
260
+ 41 - Oregon
261
+ 42 - Pennsylvania
262
+ 44 - Rhode Island
263
+ 45 - South Carolina
264
+ 46 - South Dakota
265
+ 47 - Tennessee
266
+ 48 - Texas
267
+ 49 - Utah
268
+ 50 - Vermont
269
+ 51 - Virginia
270
+ 53 - Washington
271
+ 54 - West Virginia
272
+ 55 - Wisconsin
273
+ 56 - Wyoming
274
+
275
+ --------------------------------------------------------------------------------
276
+ income
277
+ --------------------------------------------------------------------------------
278
+
279
+ Demographic question
280
+
281
+ Respondent's self-reported, pre-tax family income.
282
+
283
+ 1 - Less than $15,000
284
+ 2 - $15,000 to $24,999
285
+ 3 - $25,000 to $49,999
286
+ 4 - $50,000 to $74,999
287
+ 5 - $75,000 to $99,999
288
+ 6 - $100,000 to $149,999
289
+ 7 - $150,000 to $199,999
290
+ 8 - $200,000 to $249,999
291
+ 9 - $250,000 to $499,999
292
+ 10 - $500,000 to $999,999
293
+ 11 - More than $1 million
294
+ 95 - Prefer not to say (recoded to NA in cleaned experimental data)
295
+
296
+ Note: income is coalesced with income from tpnw_orig_income.csv, described
297
+ below.
298
+
299
+ --------------------------------------------------------------------------------
300
+ educ
301
+ --------------------------------------------------------------------------------
302
+
303
+ Demographic question
304
+
305
+ Respondent's self-reported level of education.
306
+
307
+
308
+ 1 - Did not graduate from high school
309
+ 2 - High school graduate or equivalent (for example: GED)
310
+ 3 - Some college, but no degree (yet)
311
+ 4 - 2-year college degree
312
+ 5 - 4-year college degree
313
+ 6 - Postgraduate degree (MA, MBA, MD, JD, PhD, EdD, etc.)
314
+ 95 - Other (recoded to NA in cleaned experimental data)
315
+
316
+ --------------------------------------------------------------------------------
317
+ educ_95_TEXT
318
+ --------------------------------------------------------------------------------
319
+
320
+ Demographic question
321
+
322
+ Respondent's self-reported education level (if Other; text entry).
323
+
324
+ --------------------------------------------------------------------------------
325
+ ideo
326
+ --------------------------------------------------------------------------------
327
+
328
+ Demographic question
329
+
330
+ Respondent's self-reported, five-point political ideology.
331
+
332
+
333
+ -2 - Very liberal
334
+ -1 - Liberal
335
+ 0 - Moderate; middle of the road
336
+ 1 - Conservative
337
+ 2 - Very conservative
338
+ 3 - Haven't thought much about this (recoded to NA in cleaned experimental data)
339
+
340
+ --------------------------------------------------------------------------------
341
+ pid3
342
+ --------------------------------------------------------------------------------
343
+
344
+ Demographic question
345
+
346
+ Three-point partisan identification.
347
+
348
+ -1 - Democrat
349
+ 0 - Independent
350
+ 1 - Republican
351
+
352
+ --------------------------------------------------------------------------------
353
+ pid_forc
354
+ --------------------------------------------------------------------------------
355
+
356
+ Demographic question
357
+
358
+ Follow-up question to pid3; displayed only if pid3 skipped or if respondent
359
+ replied "Independent" to pid3 (coalesced with pid3 in cleaned experimental
360
+ data).
361
+
362
+ -1 - Closer to Democratic
363
+ 0 - Neither
364
+ 1 - Closer to Republican
365
+
366
+ --------------------------------------------------------------------------------
367
+ join_tpnw
368
+ --------------------------------------------------------------------------------
369
+
370
+ Outcome question
371
+
372
+ Respondent's support for joining TPNW.
373
+
374
+ 1 - Yes
375
+ 2 - No (recoded to 0 in cleaned experimental data)
376
+
377
+ --------------------------------------------------------------------------------
378
+ tpnw_atts_danger
379
+ --------------------------------------------------------------------------------
380
+
381
+ Outcome question
382
+
383
+ Nuclear weapons are dangerous and present a threat to the world (reverse-coded
384
+ in cleaned experimental data).
385
+
386
+ 2 - Strongly Agree
387
+ 1 - Agree
388
+ -1 - Disagree
389
+ -2 - Strongly disagree
390
+
391
+ --------------------------------------------------------------------------------
392
+ tpnw_atts_peace
393
+ --------------------------------------------------------------------------------
394
+
395
+ Outcome question
396
+
397
+ Nuclear weapons contribute to peace by preventing conflict between countries.
398
+
399
+ 2 - Strongly Agree
400
+ 1 - Agree
401
+ -1 - Disagree
402
+ -2 - Strongly disagree
403
+
404
+ --------------------------------------------------------------------------------
405
+ tpnw_atts_safe
406
+ --------------------------------------------------------------------------------
407
+
408
+ Outcome question
409
+
410
+ Nuclear weapons help to keep my country safe.
411
+
412
+ 2 - Strongly Agree
413
+ 1 - Agree
414
+ -1 - Disagree
415
+ -2 - Strongly disagree
416
+
417
+ --------------------------------------------------------------------------------
418
+ tpnw_atts_use_unaccept
419
+ --------------------------------------------------------------------------------
420
+
421
+ Outcome question
422
+
423
+ It is unacceptable to use nuclear weapons in any situation (reverse-coded
424
+ in cleaned experimental data).
425
+
426
+ 2 - Strongly Agree
427
+ 1 - Agree
428
+ -1 - Disagree
429
+ -2 - Strongly disagree
430
+
431
+ --------------------------------------------------------------------------------
432
+ tpnw_atts_always_cheat
433
+ --------------------------------------------------------------------------------
434
+
435
+ Outcome question
436
+
437
+ Some countries will always cheat and disobey nuclear treaties (reverse-coded
438
+ in cleaned experimental data).
439
+
440
+ 2 - Strongly Agree
441
+ 1 - Agree
442
+ -1 - Disagree
443
+ -2 - Strongly disagree
444
+
445
+ --------------------------------------------------------------------------------
446
+ tpnw_atts_cannot_elim
447
+ --------------------------------------------------------------------------------
448
+
449
+ Outcome question
450
+
451
+ Now that nuclear weapons exist, they can never be eliminated (reverse-coded
452
+ in cleaned experimental data).
453
+
454
+ 2 - Strongly Agree
455
+ 1 - Agree
456
+ -1 - Disagree
457
+ -2 - Strongly disagree
458
+
459
+ --------------------------------------------------------------------------------
460
+ tpnw_atts_slow_reduc
461
+ --------------------------------------------------------------------------------
462
+
463
+ Outcome question
464
+
465
+ Reducing the number of nuclear weapons over time is safer than immediate nuclear
466
+ disarmament.
467
+
468
+ 2 - Strongly Agree
469
+ 1 - Agree
470
+ -1 - Disagree
471
+ -2 - Strongly disagree
472
+
473
+ ================================================================================
474
+ YOUGOV DATA (tpnw_aware.csv)
475
+ ================================================================================
476
+
477
+ --------------------------------------------------------------------------------
478
+ caseid
479
+ --------------------------------------------------------------------------------
480
+
481
+ YouGov embedded data field
482
+
483
+ Numeric variable indicating case ID.
484
+
485
+ --------------------------------------------------------------------------------
486
+ starttime
487
+ --------------------------------------------------------------------------------
488
+
489
+ YouGov embedded data field
490
+
491
+ Date that respondent started the survey
492
+
493
+ --------------------------------------------------------------------------------
494
+ endtime
495
+ --------------------------------------------------------------------------------
496
+
497
+ YouGov embedded data field
498
+
499
+ Date that respondent finished the survey
500
+
501
+ --------------------------------------------------------------------------------
502
+ weight
503
+ --------------------------------------------------------------------------------
504
+
505
+ YouGov weighting variable
506
+
507
+ Numeric variable containing post-stratification weights.
508
+
509
+ --------------------------------------------------------------------------------
510
+ birthyr
511
+ --------------------------------------------------------------------------------
512
+
513
+ YouGov demographic question
514
+
515
+ Respondent's birth year (numeric).
516
+
517
+ --------------------------------------------------------------------------------
518
+ gender
519
+ --------------------------------------------------------------------------------
520
+
521
+ YouGov demographic question
522
+
523
+ Respondent's self-reported gender.
524
+
525
+ 1 - Male
526
+ 2 - Female
527
+
528
+ --------------------------------------------------------------------------------
529
+ race
530
+ --------------------------------------------------------------------------------
531
+
532
+ YouGov demographic question
533
+
534
+ Respondent's self-reported race.
535
+
536
+ 1 - White
537
+ 2 - Black
538
+ 3 - Hispanic
539
+ 4 - Asian
540
+ 5 - Native American
541
+ 6 - Mixed
542
+ 7 - Other
543
+ 8 - Middle Eastern
544
+
545
+ --------------------------------------------------------------------------------
546
+ educ
547
+ --------------------------------------------------------------------------------
548
+
549
+ YouGov demographic question
550
+
551
+ Respondent's self-reported education level.
552
+
553
+ 1 - No high school
554
+ 2 - High school graduate
555
+ 3 - Some college
556
+ 4 - 2-year college degree
557
+ 5 - 4-year college degree
558
+ 6 - Post-graduate degree
559
+
560
+ --------------------------------------------------------------------------------
561
+ marstat
562
+ --------------------------------------------------------------------------------
563
+
564
+ YouGov demographic question
565
+
566
+ Respondent's self-reported marital status.
567
+
568
+ 1 - Married
569
+ 2 - Separated
570
+ 3 - Divorced
571
+ 4 - Widowed
572
+ 5 - Never married
573
+ 6 - Domestic / civil partnership
574
+
575
+ --------------------------------------------------------------------------------
576
+ employ
577
+ --------------------------------------------------------------------------------
578
+
579
+ YouGov demographic question
580
+
581
+ Respondent's self-reported employment status.
582
+
583
+ 1 - Full-time
584
+ 2 - Part-time
585
+ 3 - Temporarily laid off
586
+ 4 - Unemployed
587
+ 5 - Retired
588
+ 6 - Permanently disabled
589
+ 7 - Homemaker
590
+ 8 - Student
591
+ 9 - Other
592
+
593
+ --------------------------------------------------------------------------------
594
+ faminc_new
595
+ --------------------------------------------------------------------------------
596
+
597
+ YouGov demographic question
598
+
599
+ Respondent's self-reported family income.
600
+
601
+ 1 - Less than $10,000
602
+ 2 - $10,000 - $19,999
603
+ 3 - $20,000 - $29,999
604
+ 4 - $30,000 - $39,999
605
+ 5 - $40,000 - $49,999
606
+ 6 - $50,000 - $59,999
607
+ 7 - $60,000 - $69,999
608
+ 8 - $70,000 - $79,999
609
+ 9 - $80,000 - $99,999
610
+ 10 - $100,000 - $119,999
611
+ 11 - $120,000 - $149,999
612
+ 12 - $150,000 - $199,999
613
+ 13 - $200,000 - $249,999
614
+ 14 - $250,000 - $349,999
615
+ 15 - $350,000 - $499,999
616
+ 16 - $500,000 or more
617
+ 97 - Prefer not to say
618
+
619
+ --------------------------------------------------------------------------------
620
+ pid3
621
+ --------------------------------------------------------------------------------
622
+
623
+ YouGov demographic question
624
+
625
+ Respondent's self-reported three-point partisan identification.
626
+
627
+ 1 - Democrat
628
+ 2 - Republican
629
+ 3 - Independent
630
+ 4 - Other
631
+ 5 - Not sure
632
+
633
+ --------------------------------------------------------------------------------
634
+ pid7
635
+ --------------------------------------------------------------------------------
636
+
637
+ YouGov demographic question
638
+
639
+ Respondent's self-reported seven-point partisan identification.
640
+
641
+ 1 - Strong Democrat
642
+ 2 - Not very strong Democrat
643
+ 3 - Lean Democrat
644
+ 4 - Independent
645
+ 5 - Lean Republican
646
+ 6 - Not very strong Republican
647
+ 7 - Strong Republican
648
+ 8 - Not sure
649
+ 9 - Don't know
650
+
651
+ --------------------------------------------------------------------------------
652
+ presvote2016post
653
+ --------------------------------------------------------------------------------
654
+
655
+ YouGov demographic question
656
+
657
+ Respondent's self-reported 2016 Presidential Election vote choice.
658
+
659
+ 1 - Hillary Clinton
660
+ 2 - Donald Trump
661
+ 3 - Gary Johnson
662
+ 4 - Jill Stein
663
+ 5 - Evan McMullin
664
+ 6 - Other
665
+ 7 - Did not vote for President
666
+
667
+ --------------------------------------------------------------------------------
668
+ inputstate
669
+ --------------------------------------------------------------------------------
670
+
671
+ YouGov demographic question
672
+
673
+ Respondent's state of residence.
674
+
675
+ 1 - Alabama
676
+ 2 - Alaska
677
+ 4 - Arizona
678
+ 5 - Arkansas
679
+ 6 - California
680
+ 8 - Colorado
681
+ 9 - Connecticut
682
+ 10 - Delaware
683
+ 11 - District of Columbia
684
+ 12 - Florida
685
+ 13 - Georgia
686
+ 15 - Hawaii
687
+ 16 - Idaho
688
+ 17 - Illinois
689
+ 18 - Indiana
690
+ 19 - Iowa
691
+ 20 - Kansas
692
+ 21 - Kentucky
693
+ 22 - Louisiana
694
+ 23 - Maine
695
+ 24 - Maryland
696
+ 25 - Massachusetts
697
+ 26 - Michigan
698
+ 27 - Minnesota
699
+ 28 - Mississippi
700
+ 29 - Missouri
701
+ 30 - Montana
702
+ 31 - Nebraska
703
+ 32 - Nevada
704
+ 33 - New Hampshire
705
+ 34 - New Jersey
706
+ 35 - New Mexico
707
+ 36 - New York
708
+ 37 - North Carolina
709
+ 38 - North Dakota
710
+ 39 - Ohio
711
+ 40 - Oklahoma
712
+ 41 - Oregon
713
+ 42 - Pennsylvania
714
+ 44 - Rhode Island
715
+ 45 - South Carolina
716
+ 46 - South Dakota
717
+ 47 - Tennessee
718
+ 48 - Texas
719
+ 49 - Utah
720
+ 50 - Vermont
721
+ 51 - Virginia
722
+ 53 - Washington
723
+ 54 - West Virginia
724
+ 55 - Wisconsin
725
+ 56 - Wyoming
726
+
727
+ --------------------------------------------------------------------------------
728
+ votereg
729
+ --------------------------------------------------------------------------------
730
+
731
+ YouGov demographic question
732
+
733
+ Respondent's self-reported voter registration status.
734
+
735
+ 1 - Yes
736
+ 2 - No
737
+ 3 - Don't know
738
+
739
+ --------------------------------------------------------------------------------
740
+ ideo5
741
+ --------------------------------------------------------------------------------
742
+
743
+ YouGov demographic question
744
+
745
+ Respondent's self-reported, five-point political ideology.
746
+
747
+ 1 - Very liberal
748
+ 2 - Liberal
749
+ 3 - Moderate
750
+ 4 - Conservative
751
+ 5 - Very conservative
752
+ 6 - Not sure
753
+
754
+ --------------------------------------------------------------------------------
755
+ newsint
756
+ --------------------------------------------------------------------------------
757
+
758
+ YouGov demographic question
759
+
760
+ Respondent's self-reported political interest.
761
+
762
+ 1 - Most of the time
763
+ 2 - Some of the time
764
+ 3 - Only now and then
765
+ 4 - Hardly at all
766
+ 7 - Don't know
767
+
768
+ --------------------------------------------------------------------------------
769
+ religpew
770
+ --------------------------------------------------------------------------------
771
+
772
+ YouGov demographic question
773
+
774
+ Pew religion
775
+
776
+ 1 - Protestant
777
+ 2 - Roman Catholic
778
+ 3 - Mormon
779
+ 4 - Eastern or Greek Orthodox
780
+ 5 - Jewish
781
+ 6 - Muslim
782
+ 7 - Buddhist
783
+ 8 - Hindu
784
+ 9 - Atheist
785
+ 10 - Agnostic
786
+ 11 - Nothing in particular
787
+ 12 - Something else
788
+
789
+ --------------------------------------------------------------------------------
790
+ awareness
791
+ --------------------------------------------------------------------------------
792
+
793
+ Outcome question
794
+
795
+ Has respondent heard of international treaty to ban nuclear weapons
796
+
797
+ 1 - Yes, and I support it
798
+ 2 - Yes, and I oppose it
799
+ 3 - No, but it sounds like I would support it
800
+ 4 - No, but it sounds like I would oppose it
801
+ 8 - Skipped
802
+
803
+ ================================================================================
804
+ ORIGINAL INCOME DATA (tpnw_orig_income.csv)
805
+ ================================================================================
806
+
807
+ --------------------------------------------------------------------------------
808
+ income
809
+ --------------------------------------------------------------------------------
810
+
811
+ Demographic question
812
+
813
+ Numeric text-entry variable indicating respondent's self-reported income;
814
+ converted to categorical variable to match with income from tpnw_raw.csv,
815
+ described above.
816
+
817
+ --------------------------------------------------------------------------------
818
+ consent
819
+ --------------------------------------------------------------------------------
820
+
821
+ Custom embedded data field
822
+
823
+ Indicator of whether a respondent consented to participate in the survey.
824
+
825
+ 0 - Respondent did not consent
826
+ 1 - Respondent consented
827
+
828
+ --------------------------------------------------------------------------------
829
+ pid
830
+ --------------------------------------------------------------------------------
831
+
832
+ Dynata embedded data field
833
+
834
+ Numeric variable uniquely identifying a panelist (panelist ID).
835
+
836
+ ================================================================================
837
+ CLEANED EXPERIMENTAL DATA (tpnw_data.csv)
838
+
839
+ Only newly instantiated variables are described below; any recodings of
840
+ variables described above are documented in the replication code cleaning script
841
+ (hbg_cleaning.R) available in ../scripts
842
+ ================================================================================
843
+
844
+ --------------------------------------------------------------------------------
845
+ female
846
+ --------------------------------------------------------------------------------
847
+
848
+ Demographic question
849
+
850
+ Indicator of whether respondent self-reported female gender.
851
+
852
+ 0 - No
853
+ 1 - Yes
854
+ NA - Other/skipped
855
+
856
+ --------------------------------------------------------------------------------
857
+ age
858
+ --------------------------------------------------------------------------------
859
+
860
+ Demographic question
861
+
862
+ Numeric variable indicating respondent's age, subtracting self-reported birth
863
+ year from 2019, the year in which the survey was conducted (2019 - birthyr).
864
+
865
+ --------------------------------------------------------------------------------
866
+ northeast
867
+ --------------------------------------------------------------------------------
868
+
869
+ Demographic question
870
+
871
+ Indicator of whether respondent's state is in the Northeast region defined by
872
+ the U.S. Census Bureau.
873
+
874
+ 0 - No
875
+ 1 - Yes
876
+ NA - Skipped
877
+
878
+ --------------------------------------------------------------------------------
879
+ midwest
880
+ --------------------------------------------------------------------------------
881
+
882
+ Demographic question
883
+
884
+ Indicator of whether respondent's state is in the Midwest region defined by the
885
+ U.S. Census Bureau.
886
+
887
+ 0 - No
888
+ 1 - Yes
889
+ NA - Skipped
890
+
891
+ --------------------------------------------------------------------------------
892
+ south
893
+ --------------------------------------------------------------------------------
894
+
895
+ Demographic question
896
+
897
+ Indicator of whether respondent's state is in the South region defined by the
898
+ U.S. Census Bureau.
899
+
900
+ 0 - No
901
+ 1 - Yes
902
+ NA - Skipped
903
+
904
+ --------------------------------------------------------------------------------
905
+ west
906
+ --------------------------------------------------------------------------------
907
+
908
+ Demographic question
909
+
910
+ Indicator of whether respondent's state is in the West region defined by the
911
+ U.S. Census Bureau.
912
+
913
+ 0 - No
914
+ 1 - Yes
915
+ NA - Skipped
916
+
917
+ --------------------------------------------------------------------------------
918
+ caseid
919
+ --------------------------------------------------------------------------------
920
+
921
+ Weighting variable
922
+
923
+ Unique identifier for each respondent for the purposes of computing raked
924
+ post-stratification weights with anesrake.
925
+
926
+ --------------------------------------------------------------------------------
927
+ age_wtng
928
+ --------------------------------------------------------------------------------
929
+
930
+ Weighting variable
931
+
932
+ Coarsened and factorized age variable for the purposes of computing raked
933
+ post-stratification weights with anesrake.
934
+
935
+ age1824 - Respondent is in the 18-24-year-old age group
936
+ age2534 - Respondent is in the 25-34-year-old age group
937
+ age3544 - Respondent is in the 35-44-year-old age group
938
+ age4554 - Respondent is in the 45-54-year-old age group
939
+ age5564 - Respondent is in the 55-64-year-old age group
940
+ age6599 - Respondent is in the 65-99-year-old age group
941
+
942
+ --------------------------------------------------------------------------------
943
+ female_wtng
944
+ --------------------------------------------------------------------------------
945
+
946
+ Weighting variable
947
+
948
+ Factorized female variable for the purposes of computing raked
949
+ post-stratification weights with anesrake.
950
+
951
+ female - Respondent is female
952
+ na - Skipped/Other
953
+ male - Respondent is male
954
+
955
+ --------------------------------------------------------------------------------
956
+ northeast_wtng
957
+ --------------------------------------------------------------------------------
958
+
959
+ Weighting variable
960
+
961
+ Factorized northeast variable for the purposes of computing raked
962
+ post-stratification weights with anesrake.
963
+
964
+ northeast - Respondent is from the Northeast
965
+ other - Respondent is from another region
966
+
967
+ --------------------------------------------------------------------------------
968
+ midwest_wtng
969
+ --------------------------------------------------------------------------------
970
+
971
+ Weighting variable
972
+
973
+ Factorized midwest variable for the purposes of computing raked
974
+ post-stratification weights with anesrake.
975
+
976
+ midwest - Respondent is from the Midwest
977
+ other - Respondent is from another region
978
+
979
+ --------------------------------------------------------------------------------
980
+ south_wtng
981
+ --------------------------------------------------------------------------------
982
+
983
+ Weighting variable
984
+
985
+ Factorized south variable for the purposes of computing raked
986
+ post-stratification weights with anesrake.
987
+
988
+ south - Respondent is from the South
989
+ other - Respondent is from another region
990
+
991
+ --------------------------------------------------------------------------------
992
+ west_wtng
993
+ --------------------------------------------------------------------------------
994
+
995
+ Weighting variable
996
+
997
+ Factorized west variable for the purposes of computing raked
998
+ post-stratification weights with anesrake.
999
+
1000
+ west - Respondent is from the West
1001
+ other - Respondent is from another region
1002
+
1003
+ --------------------------------------------------------------------------------
1004
+ anesrake_weight
1005
+ --------------------------------------------------------------------------------
1006
+
1007
+ Custom weighting variable
1008
+
1009
+ Raked post-stratification weights computed with anesrake.
1/replication_package/meta/hbg_instrument.pdf ADDED
Binary file (132 kB). View file
 
1/replication_package/meta/hbg_pap.pdf ADDED
Binary file (264 kB). View file
 
1/replication_package/scripts/hbg_analysis.R ADDED
@@ -0,0 +1,1033 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Initialize workspace.
2
+ ## Clear workspace.
3
+ rm(list = ls(all = TRUE))
4
+
5
+ ## Confirm working directory.
6
+ setwd("~/Downloads/hbg_replication")
7
+
8
+ ## Set seed.
9
+ set.seed(123)
10
+
11
+ ## Set number of iterations for bootstrap replication.
12
+ n_iter <- 10000
13
+
14
+ ## Load relevant packages.
15
+ library(sandwich)
16
+ library(car)
17
+
18
+ ## Load relevant helper functions.
19
+ source("scripts/helper_functions.R")
20
+
21
+ ## Load data.
22
+ # Load experimental data.
23
+ tpnw <- read.csv("data/tpnw_data.csv", row.names = 1,
24
+ stringsAsFactors = FALSE)
25
+
26
+ # Load YouGov data.
27
+ aware <- read.csv("data/tpnw_aware.csv", row.names = 1,
28
+ stringsAsFactors = FALSE)
29
+
30
+ ### Define relevant objects.
31
+ ## Define objects specifying outcomes.
32
+ # Specify join_tpnw object, representing main outcome.
33
+ join_tpnw <- "join_tpnw"
34
+
35
+ # Specify tpnw_atts object, representing attitudinal outcomes.
36
+ tpnw_atts <- names(tpnw)[startsWith(names(tpnw), "tpnw_atts")]
37
+
38
+ # Specify all_outs object, concatenating main and attitudinal outcomes.
39
+ all_outs <- c(join_tpnw, tpnw_atts)
40
+
41
+ ## Define objects specifying predictors.
42
+ # Define object specifying main treatments.
43
+ treats <- c("group_cue", "security_cue", "norms_cue", "institutions_cue")
44
+
45
+ # Define object specifying general demographics.
46
+ demos <- c("age", "female", "midwest", "west", "south", "income", "educ")
47
+
48
+ # Define object specifying politically relevant demographics.
49
+ pol_demos <- c("ideo", "pid3")
50
+
51
+ # Define list of conditioning sets (NULL corresponds to Model 1, whereas the use
52
+ # of demographic and political covariates corresponds to Model 2).
53
+ covars <- list(NULL, c(demos, pol_demos))
54
+
55
+ ### Produce analysis.
56
+ ## Produce balance table.
57
+ # Specify covariates to be used for balance table.
58
+ bal_covars <- c("age", "female", "northeast", "midwest", "west",
59
+ "south", "income", "educ", "ideo", "pid3")
60
+
61
+ # Produce balance table matrix output, looping over treatment group.
62
+ bal_mat <- lapply(0:4, function (i) {
63
+ # For each treatment value ...
64
+ apply(tpnw[bal_covars][tpnw$treatment == i,], 2, function (x) {
65
+
66
+ # Calculate the mean of each covariate.
67
+ mean_x <- mean(x)
68
+
69
+ # Calculate SE estimates using 10,000 bootstrap replicates.
70
+ sd_x <- sd(replicate(10000, {
71
+ samp <- x[sample(length(x), replace = TRUE)]
72
+ return(mean(samp))
73
+ }))
74
+
75
+ # Return a list containing both point estimates.
76
+ return(list(mean = mean_x, sd = sd_x))
77
+ })
78
+ })
79
+
80
+ # Bind point estimates for each treatment group.
81
+ bal_mat <- lapply(bal_mat, function (treat) {
82
+ do.call("rbind", unlist(treat, recursive = FALSE))
83
+ })
84
+
85
+ # Convert list into a matrix, with columns representing treatment group.
86
+ bal_mat <- do.call("cbind", bal_mat)
87
+
88
+ # Round all estimates to within three decimal points and convert to character
89
+ # for the purposes of producing tabular output.
90
+ bal_tab <- apply(bal_mat, 2, function (x) format(round(x, 3), digits = 3))
91
+
92
+ # Specify rows containing mean point estimates.
93
+ mean_rows <- endsWith(rownames(bal_tab), ".mean")
94
+
95
+ # Specify rows containing SE point estimates.
96
+ se_rows <- endsWith(rownames(bal_tab), ".sd")
97
+
98
+ # Reformat SE estimates to be within parentheses.
99
+ bal_tab[se_rows,] <- apply(bal_tab[se_rows,], 2, function (x) {
100
+ paste0("(", x, ")")
101
+ })
102
+
103
+ # Remove row names for rows with SE estimates.
104
+ rownames(bal_tab)[se_rows] <- ""
105
+
106
+ # Remove ".mean" string in row names for rows with mean estimates.
107
+ rownames(bal_tab)[mean_rows] <- gsub(".mean", "", rownames(bal_tab)[mean_rows])
108
+
109
+ # Concatenate data to comport with LaTeX tabular markup.
110
+ bal_tab <- paste(paste(paste(
111
+ capwords(rownames(bal_tab)), apply(bal_tab, 1, function (x) {
112
+ paste(x, collapse = " & ")
113
+ }),
114
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
115
+ bal_tab <- gsub("\\( ", "\\(", bal_tab)
116
+
117
+ # Produce tabular output.
118
+ sink("output/balance_tab.tex")
119
+ cat("\\begin{table}\n",
120
+ "\\caption{Covariate Balance Across Treatment Arms}\n",
121
+ "\\centering\\small\n",
122
+ "\\sisetup{\n",
123
+ "\tdetect-all,\n",
124
+ "\ttable-number-alignment = center,\n",
125
+ "\ttable-figures-integer = 1,\n",
126
+ "\ttable-figures-decimal = 3,\n",
127
+ "\tinput-symbols = {()}\n",
128
+ "}\n",
129
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{2.75cm}*{5}",
130
+ "{S[table-number-alignment = center, table-column-width = 1.75cm]}}\n"),
131
+ "\\toprule\n",
132
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
133
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
134
+ bal_tab,
135
+ "\\bottomrule\n",
136
+ "\\end{tabular}\n",
137
+ "\\end{table}\n")
138
+ sink()
139
+
140
+ ## Produce main results.
141
+ # Compute main results, looping over conditioning sets.
142
+ main_results <- lapply(covars, function (covar) {
143
+ # For each conditioning set ...
144
+ # Specify the relevant regression formula.
145
+ form <- as.formula(paste(join_tpnw, paste(c(treats, covar),
146
+ collapse = " + "), sep = " ~ "))
147
+
148
+ # Fit the OLS model per the specification.
149
+ fit <- lm(form, data = tpnw)
150
+
151
+ # Compute HC2 robust standard errors.
152
+ ses <- sqrt(diag(vcovHC(fit, type = "HC2")))
153
+
154
+ # Bind coefficient and SE output.
155
+ reg_out <- cbind(fit$coef[2:5], ses[2:5])
156
+
157
+ # Name output matrix columns and rows.
158
+ colnames(reg_out) <- c("coef", "se")
159
+ rownames(reg_out) <- treats
160
+
161
+ # Return output
162
+ return(as.data.frame(reg_out))
163
+ })
164
+
165
+ # Name results to distinguish between Model 1 and Model 2 estimates.
166
+ names(main_results) <- c("model_1", "model_2")
167
+
168
+ ## Assess significance of effect estimates and differences.
169
+ # Estimate Bonferroni-Holm-adjusted p-values.
170
+ bf_ps <- lapply(main_results, function (x) {
171
+ round(p.adjust(pnorm(x[, 1] / x[, 2], lower.tail = TRUE),
172
+ method = "holm"), 3)
173
+ })
174
+
175
+ # Estimate FDR-adjusted p-values, as an added robustness check.
176
+ fdr_ps <- lapply(main_results, function (x) {
177
+ round(p.adjust(pnorm(x[, 1] / x[, 2], lower.tail = TRUE),
178
+ method = "fdr"), 3)
179
+ })
180
+
181
+ # Redefine the main model (Model 2), and store full VCOV matrix.
182
+ main_model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue +
183
+ institutions_cue + age + female + midwest +
184
+ west + south + income + educ + ideo + pid3, tpnw)
185
+ main_vcov <- vcovHC(main_model, "HC2")
186
+
187
+ # Specify diff_sig function for assessing significance between two effect
188
+ # estimates (defined here for the sake of clarity).
189
+ diff_sig <- function (eff_1, eff_2) {
190
+ diff <- main_model$coef[eff_1] - main_model$coef[eff_2]
191
+ se <- sqrt(main_vcov[eff_1, eff_1] + main_vcov[eff_2, eff_2] -
192
+ 2 * main_vcov[eff_1, eff_2])
193
+ p <- 2 * (1 - pnorm(abs(diff) / se))
194
+ return (p)
195
+ }
196
+
197
+ # Assess the significance of the difference between institution and security cue
198
+ # effect estimates .
199
+ inst_sec_diff_p <- diff_sig("institutions_cue", "security_cue")
200
+
201
+ # Assess the significance of the difference between institution and group cue
202
+ # effect estimates
203
+ inst_grp_diff_p <- diff_sig("institutions_cue", "group_cue")
204
+
205
+ # Assess the significance of the difference between security and group cue
206
+ # effect estimates
207
+ sec_grp_diff_p <- diff_sig("security_cue", "group_cue")
208
+
209
+ # Assess the significance of the difference between security and norms cue
210
+ # effect estimates
211
+ sec_norms_diff_p <- diff_sig("security_cue", "norms_cue")
212
+
213
+ # Assess the significance of the difference between institution and group cue
214
+ # effect estimates
215
+ inst_norms_diff_p <- diff_sig("institutions_cue", "norms_cue")
216
+
217
+ # Assess the significance of the difference between institution and group cue
218
+ # effect estimates
219
+ grp_norms_diff_p <- diff_sig("group_cue", "norms_cue")
220
+
221
+ # The significance of differences between effect estimates was also assessed
222
+ # using 10,000 bootstrap replicates and two-tailed p-values; relevant code is
223
+ # included below with the institutions and security cues, for posterity, but is
224
+ # not run.
225
+
226
+ # Compute SE estimates.
227
+ # diffs <- replicate(10000, {
228
+ # samp <- tpnw[sample(nrow(tpnw), replace = TRUE),]
229
+ # model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue +
230
+ # institutions_cue + age + female + midwest +
231
+ # west + south + income + educ + ideo + pid3, samp)
232
+ # model$coef[5] - model$coef[3]
233
+ # })
234
+ # diffs_se <- sd(diffs)
235
+ #
236
+ # # Fit model.
237
+ # model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue +
238
+ # institutions_cue + age + female + midwest +
239
+ # west + south + income + educ + ideo + pid3, tpnw)
240
+ #
241
+ # # Compute two-tailed p-value.
242
+ # 2 * (1 - pnorm(abs((model$coef[5] - model$coef[3])/diffs_se)))
243
+
244
+ ## Assess YouGov results.
245
+ # Tabulate responses.
246
+ aware_table <- table(aware$awareness, useNA = "ifany")
247
+ names(aware_table) <- c("Yes, support", "Yes, oppose",
248
+ "No, support", "No, oppose", "Skipped")
249
+
250
+ # Compute both weighted and unweighted means.
251
+ aware_results <- lapply(1:4, function (resp) {
252
+ # Calculate weighted mean.
253
+ wt_mean <- with(aware, weighted.mean(awareness == resp,
254
+ w = weight, na.rm = TRUE))
255
+
256
+ # Calculate raw mean.
257
+ rw_mean <- with(aware, mean(awareness == resp, na.rm = TRUE))
258
+
259
+ # Concatenate means and rename vector.
260
+ means <- c(wt_mean, rw_mean)
261
+ names(means) <- c("weighted_mean", "raw_mean")
262
+
263
+ # Calculate SE estimates with 10,000 bootstrap replicates.
264
+ ses <- replicate(10000, {
265
+ samp <- aware[sample(nrow(aware),
266
+ replace = TRUE),]
267
+ wt_mean <- with(samp, weighted.mean(awareness == resp,
268
+ w = weight, na.rm = TRUE))
269
+ rw_mean <- with(samp, mean(awareness == resp,
270
+ na.rm = TRUE))
271
+ return(c(wt_mean, rw_mean))
272
+ })
273
+ ses <- apply(ses, 1, sd)
274
+ names(ses) <- c("weighted_mean", "raw_mean")
275
+
276
+ # Bind mean and SE estimates.
277
+ outs <- rbind(means, ses)
278
+ rownames(outs) <- paste(names(aware_table)[resp],
279
+ c("mean", "se"), sep = "_")
280
+ return(outs)
281
+ })
282
+
283
+ # Name results to distinguish between responses.
284
+ names(aware_results) <- c("Yes, support", "Yes, oppose",
285
+ "No, support", "No, oppose")
286
+
287
+ ## Assess covariate means for experimental and YouGov data (used in Table A1).
288
+ # Indicate the list of covariates to be assessed.
289
+ demo_tab_vars <- c("age", "female", "northeast", "midwest", "west", "south")
290
+
291
+ # Compute covariate averages for experimental data.
292
+ tpnw_means <- apply(tpnw[demo_tab_vars], 2, mean, na.rm = TRUE)
293
+
294
+ # Compute covariate averages for YouGov data.
295
+ aware_means <- apply(aware[demo_tab_vars], 2, function (x) {
296
+ weighted.mean(x, na.rm = TRUE, w = aware$weight)
297
+ })
298
+
299
+ # Compute bootstrap standard errors for demographic means.
300
+ demo_ses <- replicate(10000, {
301
+ # Sample the experimental data.
302
+ samp_tpnw <- tpnw[sample(nrow(tpnw), replace = TRUE), demo_tab_vars]
303
+
304
+ # Sample the YouGov data.
305
+ samp_aware <- aware[sample(nrow(aware), replace = TRUE),
306
+ c(demo_tab_vars, "weight")]
307
+
308
+ # Compute bootstrap means for experimental data.
309
+ tpnw_means <- apply(samp_tpnw[demo_tab_vars], 2, mean, na.rm = TRUE)
310
+
311
+ # Compute bootstrap means for YouGov data.
312
+ aware_means <- apply(samp_aware[demo_tab_vars], 2, function (x) {
313
+ weighted.mean(x, na.rm = TRUE, w = samp_aware$weight)
314
+ })
315
+
316
+ # Return the results as a list, and ensure that replicate() also returns a
317
+ # list.
318
+ return(list(tpnw = tpnw_means, aware = aware_means))
319
+ }, simplify = FALSE)
320
+
321
+ # Compute SE estimates for each set of demographics.
322
+ demo_ses <- lapply(c("tpnw", "aware"), function (dataset) {
323
+ # Group all estimates from each dataset.
324
+ sep_res <- lapply(demo_ses, function (iteration) {
325
+ return(iteration[[dataset]])
326
+ })
327
+
328
+ # Bind estimates.
329
+ sep_res <- do.call("rbind", sep_res)
330
+
331
+ # Compute SE estimates.
332
+ sep_ses <- apply(sep_res, 2, sd)
333
+
334
+ # Return SE estimates.
335
+ return(sep_ses)
336
+ })
337
+
338
+ ## Assess responses to the attitudinal battery.
339
+ # Assess responses to the attitudinal battery, looping over treatment group. For
340
+ # each treatment value ...
341
+ att_results <- lapply(0:4, function (i) {
342
+ # Calculate the average response to each attitudinal battery question.
343
+ atts_mean <- apply(tpnw[tpnw$treatment == i, tpnw_atts], 2, function (x) {
344
+ mean(x, na.rm = TRUE)
345
+ })
346
+
347
+ # Calculate SE estimates using 10,000 bootstrap replicates.
348
+ bl_atts_boot <- replicate(10000, {
349
+ dat <- tpnw[tpnw$treatment == i, tpnw_atts]
350
+ samp <- dat[sample(nrow(dat), replace = TRUE),]
351
+ apply(samp, 2, function (x) mean(x, na.rm = TRUE))
352
+ })
353
+ bl_atts_ses <- apply(bl_atts_boot, 1, sd)
354
+
355
+ # Combine mean and SE estimates and return results.
356
+ return(cbind(atts_mean, bl_atts_ses))
357
+ })
358
+
359
+ # Compute treatment effects on responses to the attitudinal battery, looping
360
+ # over conditioning sets.
361
+ att_effs <- lapply(covars, function (covar) {
362
+ # For each conditioning set ...
363
+ model_res <- lapply(tpnw_atts, function (out) {
364
+ # Specify the relevant regression formula.
365
+ form <- as.formula(paste(out,
366
+ paste(c(treats, covar),
367
+ collapse = " + "),
368
+ sep = " ~ "))
369
+
370
+ # Fit the OLS model per the specification.
371
+ fit <- lm(form, data = tpnw)
372
+
373
+ # Compute HC2 robust standard errors.
374
+ ses <- sqrt(diag(vcovHC(fit, type = "HC2")))
375
+
376
+ # Bind coefficient and SE output.
377
+ reg_out <- cbind(fit$coef[2:5], ses[2:5])
378
+
379
+ # Name output matrix columns and rows.
380
+ colnames(reg_out) <- c("coef", "se")
381
+ rownames(reg_out) <- treats
382
+
383
+ # Return output.
384
+ return(as.data.frame(reg_out))
385
+ })
386
+ # Name results to distinguish between each attitudinal battery
387
+ # outcome and return results.
388
+ names(model_res) <- tpnw_atts
389
+ return(model_res)
390
+ })
391
+
392
+ # Name results to distinguish between Model 1 and Model 2 estimates.
393
+ names(att_effs) <- c("model_1", "model_2")
394
+
395
+ ## Perform subgroup analysis.
396
+ # Compute mean support by political party, looping over treatment group.
397
+ pid_results <- lapply(0:4, function (treat) {
398
+ # For each partisan group ...
399
+ out <- lapply(-1:1, function (i) {
400
+ # Calculate average support.
401
+ pid_mean <- with(tpnw,
402
+ mean(join_tpnw[pid3 == i &
403
+ treatment == treat],
404
+ na.rm = TRUE))
405
+
406
+ # Calculate SE estimates with 10,000
407
+ # bootstrap replicates.
408
+ pid_boot <- replicate(10000, {
409
+ dat <- tpnw$join_tpnw[tpnw$pid3 == i &
410
+ tpnw$treatment == treat]
411
+ samp <- dat[sample(length(dat),
412
+ replace = TRUE)]
413
+ mean(samp, na.rm = TRUE)
414
+ })
415
+
416
+ # Concatenate and return mean and SE
417
+ # estimates.
418
+ return(c(mean = pid_mean, se = sd(pid_boot)))
419
+ })
420
+
421
+ # Name results to distinguish estimates by political party,
422
+ # and return output.
423
+ names(out) <- c("dem", "ind", "rep")
424
+ return(as.data.frame(out))
425
+ })
426
+
427
+ # Name results to distinguish between treatment groups.
428
+ names(pid_results) <- c("Control", paste(c("Group", "Security", "Norms",
429
+ "Institutions"), "Cue"))
430
+
431
+ # Assess significance between control-group means; for 10,000 bootstrap
432
+ # replicates ...
433
+ pid_diff_ses <- replicate(10000, {
434
+ # Sample with replacement.
435
+ samp <- tpnw[sample(nrow(tpnw), replace = TRUE),]
436
+
437
+ # Compute the difference between Democrats' and
438
+ # Independents' support.
439
+ dem_ind_diff <- with(samp[samp$treatment == 0,],
440
+ mean(join_tpnw[pid3 == -1],
441
+ na.rm = TRUE) -
442
+ mean(join_tpnw[pid3 == 0],
443
+ na.rm = TRUE))
444
+ # Compute the difference between Democrats' and
445
+ # Republicans' support.
446
+ dem_rep_diff <- with(samp[samp$treatment == 0,],
447
+ mean(join_tpnw[pid3 == -1],
448
+ na.rm = TRUE) -
449
+ mean(join_tpnw[pid3 == 1],
450
+ na.rm = TRUE))
451
+ # Compute the difference between Independents' and
452
+ # Republicans' support.
453
+ ind_rep_diff <- with(samp[samp$treatment == 0,],
454
+ mean(join_tpnw[pid3 == 1],
455
+ na.rm = TRUE) -
456
+ mean(join_tpnw[pid3 == 0],
457
+ na.rm = TRUE))
458
+
459
+ # Concatenate and name results.
460
+ out <- c(dem_ind_diff, dem_rep_diff, ind_rep_diff)
461
+ names(out) <- c("dem_ind", "dem_rep", "ind_rep")
462
+ return(out)
463
+ })
464
+
465
+ # Compute SE estimates for each difference.
466
+ pid_diff_ses <- apply(pid_diff_ses, 1, sd)
467
+
468
+ # Assess significance for each difference.
469
+ dem_ind_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "dem"] -
470
+ pid_results$Control["mean", "ind"]) / pid_diff_ses["dem_ind"]))
471
+ dem_rep_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "dem"] -
472
+ pid_results$Control["mean", "rep"]) / pid_diff_ses["dem_rep"]))
473
+ ind_rep_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "ind"] -
474
+ pid_results$Control["mean", "rep"]) / pid_diff_ses["ind_rep"]))
475
+
476
+ # Compute mean support by political ideology, looping over treatment group.
477
+ tpnw$ideo <- recode(tpnw$ideo, "c(-2, -1) = 'liberal';
478
+ 0 = 'moderate';
479
+ c(1, 2) = 'conservative'")
480
+ ideo_results <- lapply(0:4, function (treat) {
481
+ # For each ideological group ...
482
+ out <- lapply(c("liberal", "moderate", "conservative"), function (i) {
483
+ # Calculate average support.
484
+ pid_mean <- with(tpnw,
485
+ mean(join_tpnw[ideo == i &
486
+ treatment == treat],
487
+ na.rm = TRUE))
488
+
489
+ # Calculate SE estimates with 10,000
490
+ # bootstrap replicates.
491
+ pid_boot <- replicate(10000, {
492
+ dat <- tpnw$join_tpnw[tpnw$ideo == i &
493
+ tpnw$treatment == treat]
494
+ samp <- dat[sample(length(dat),
495
+ replace = TRUE)]
496
+ mean(samp, na.rm = TRUE)
497
+ })
498
+
499
+ # Concatenate and return mean and SE
500
+ # estimates.
501
+ return(c(mean = pid_mean, se = sd(pid_boot)))
502
+ })
503
+
504
+ # Name results to distinguish estimates by political ideology,
505
+ # and return output.
506
+ names(out) <- c("liberal", "moderate", "conservative")
507
+ return(as.data.frame(out))
508
+ })
509
+
510
+ # Name results to distinguish between treatment groups.
511
+ names(ideo_results) <- c("Control", paste(c("Group", "Security", "Norms",
512
+ "Institutions"), "Cue"))
513
+
514
+ ## Produce weighted main results.
515
+ # Compute weighted main results, looping over conditioning sets.
516
+ w_main_results <- lapply(covars, function (covar) {
517
+ # For each conditioning set ...
518
+ # Specify the relevant regression formula.
519
+ form <- as.formula(paste(join_tpnw, paste(c(treats, covar),
520
+ collapse = " + "), sep = " ~ "))
521
+
522
+ # Fit the OLS model per the specification.
523
+ fit <- lm(form, data = tpnw, weights = anesrake_weight)
524
+
525
+ # Compute HC2 robust standard errors.
526
+ ses <- sqrt(diag(vcovHC(fit, type = "HC2")))
527
+
528
+ # Bind coefficient and SE output.
529
+ reg_out <- cbind(fit$coef[2:5], ses[2:5])
530
+
531
+ # Name output matrix columns and rows.
532
+ colnames(reg_out) <- c("coef", "se")
533
+ rownames(reg_out) <- treats
534
+
535
+ # Return output
536
+ return(as.data.frame(reg_out))
537
+ })
538
+
539
+ # Name results to distinguish between Model 1 and Model 2 estimates.
540
+ names(w_main_results) <- c("model_1", "model_2")
541
+
542
+ ### Produce plots and tables.
543
+ ## Produce main results plot.
544
+ # Produce main results matrix for plotting.
545
+ main_mat <- do.call("rbind", lapply(1:2, function (model) {
546
+ cbind(main_results[[model]], model)
547
+ }))
548
+
549
+ # Store values for constructing 90- and 95-percent CIs.
550
+ z_90 <- qnorm(.95)
551
+ z_95 <- qnorm(.975)
552
+
553
+ # Open new pdf device.
554
+ setEPS()
555
+ postscript("output/fg1.eps", width = 8, height = 5.5)
556
+
557
+ # Define custom graphical parameters.
558
+ par(mar = c(8, 7, 2, 2))
559
+
560
+ # Open new, empty plot.
561
+ plot(0, type = "n", axes = FALSE, ann = FALSE,
562
+ xlim = c(-.3, .05), ylim = c(.8, 4))
563
+
564
+ # Produce guidelines to go behind point estimates and error bars.
565
+ abline(v = seq(-.3, .05, .05)[-7], col = "lightgrey", lty = 3)
566
+
567
+ # Add Model 1 point estimates.
568
+ par(new = TRUE)
569
+ plot(x = main_mat$coef[main_mat$model == 1], y = 1:4 + .05,
570
+ xlim = c(-.3, .05), ylim = c(.8, 4), pch = 16, col = "steelblue2",
571
+ xlab = "", ylab = "", axes = FALSE)
572
+
573
+ # Add Model 2 point estimates.
574
+ par(new = TRUE)
575
+ plot(x = main_mat$coef[main_mat$model == 2], y = 1:4 - .05,
576
+ xlim = c(-.3, .05), ylim = c(.8, 4), pch = 16, col = "#FF8F37", main = "",
577
+ xlab = "", ylab = "", axes = FALSE)
578
+
579
+ # Add horizontal axis indicating effect estimate size.
580
+ axis(side = 1, at = round(seq(-.3, 0, .05), 2), labels = FALSE)
581
+ mtext(side = 1, at = seq(-.3, .1, .1), text = c("-30", "-20", "-10", "0"),
582
+ cex = .9, line = .75)
583
+ axis(side = 1, at = round(seq(-.25, .05, .05), 2), tck = -.01, labels = FALSE)
584
+
585
+ # Add vertical axis specifying treatment names corresponding to point estimates.
586
+ axis(side = 2, at = 1:4, labels = FALSE)
587
+ mtext(side = 2, line = .75, at = 1:4,
588
+ text = paste(c("Group", "Security", "Norms", "Institutions"), "Cue"),
589
+ las = 1, padj = .35, cex = .9)
590
+
591
+ # Add axis labels.
592
+ mtext(side = 2, line = 2.3, at = 4.2, text = "Treatment",
593
+ font = 2, las = 1, xpd = TRUE)
594
+ mtext(side = 1, text = "Estimated Effect Size", line = 2.5, at = -.15, font = 2)
595
+
596
+ # Add a dashed line at zero.
597
+ abline(v = 0.00, lty = 2)
598
+
599
+ # Add two-sided, 90-percent CIs.
600
+ with(main_mat[main_mat$model == 1,],
601
+ segments(x0 = coef - z_90 * se, y0 = 1:4 + .05, x1 = coef + z_90 * se,
602
+ y1 = 1:4 + .05, col = "steelblue2", lwd = 3))
603
+ with(main_mat[main_mat$model == 2,],
604
+ segments(x0 = coef - z_90 * se, y0 = 1:4 - .05, x1 = coef + z_90 * se,
605
+ y1 = 1:4 - .05, col = "#FF8F37", lwd = 3))
606
+
607
+ # Add two-sided 95-percent CIs.
608
+ with(main_mat[main_mat$model == 1,],
609
+ segments(x0 = coef - z_95 *se, y0 = 1:4 + .05, x1 = coef + z_95 *se,
610
+ y1 = 1:4 + .05, col = "steelblue2", lwd = 1))
611
+ with(main_mat[main_mat$model == 2,],
612
+ segments(x0 = coef - z_95 *se, y0 = 1:4 - .05, x1 = coef + z_95 *se,
613
+ y1 = 1:4 - .05, col = "#FF8F37", lwd = 1))
614
+
615
+ # Add legend.
616
+ legend(legend = paste("Model", 1:2), x = -.15, y = -.275, horiz = TRUE,
617
+ pch = 16, col = c("steelblue2", "#FF8F37"), xjust = .5, xpd = TRUE,
618
+ text.width = .05, cex = .9)
619
+
620
+ # Draw a box around the plot.
621
+ box()
622
+
623
+ # Close the grpahical device.
624
+ dev.off()
625
+
626
+ ## Create tabular output for main results.
627
+ # Define matrix object of main results.
628
+ tab_dat <- do.call("cbind", main_results)
629
+
630
+ # Compute control-group means, with SE estimates; define OLS formula.
631
+ ctrl_form <- as.formula(paste(join_tpnw, paste(treats,
632
+ collapse = " + "), sep = " ~ "))
633
+
634
+ # Fit the OLS model per the specification and recover the control mean.
635
+ ctrl_fit <- lm(ctrl_form, data = tpnw)
636
+
637
+ # Recover the control-group mean.
638
+ ctrl_mean <- ctrl_fit$coef["(Intercept)"]
639
+
640
+ # Compute control SE.
641
+ ctrl_se <- sqrt(diag(vcovHC(ctrl_fit, "HC2")))["(Intercept)"]
642
+
643
+ # Concatenate mean and SE output with blank values for Model 2.
644
+ ctrl_results <- c(format(round(c(ctrl_mean, ctrl_se), 3) * 100, digits = 2),
645
+ "|", "|")
646
+
647
+ # Reformat data to include a decimal point.
648
+ tab_dat <- apply(tab_dat, 2, function (y) format(round(y, 3) * 100, digits = 2))
649
+
650
+ # Bind control-group means with main results data.
651
+ tab <- rbind(ctrl_results, tab_dat)
652
+
653
+ # Rename row containing control-group means.
654
+ rownames(tab)[which(rownames(tab) == "1")] <- "control_mean"
655
+
656
+ # Relabel coefficient columns.
657
+ coef_cols <- grep("coef$", colnames(tab))
658
+
659
+ # Relabel SE columns.
660
+ se_cols <- grep("se$", colnames(tab))
661
+
662
+ # Reformat SE estimates to be within parentheses.
663
+ tab[,se_cols] <- apply(tab[, se_cols], 2, function (y) paste0("(", y, ")"))
664
+
665
+ # Concatenate data to comport with LaTeX tabular markup.
666
+ tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(tab))),
667
+ apply(tab, 1, function (x) {
668
+ paste(x, collapse = " & ")
669
+ }), sep = " & "), collapse = " \\\\\n"), "\\\\\n")
670
+
671
+ # Produce tabular output.
672
+ sink("output/main_results_tab.tex")
673
+ cat("\\begin{table}\n",
674
+ "\\caption{Estimated Treatment Effects on Support for TPNW}\n",
675
+ "\\begin{adjustbox}{width = \\textwidth, center}\n",
676
+ "\\sisetup{\n",
677
+ "\tdetect-all,\n",
678
+ "\ttable-number-alignment = center,\n",
679
+ "\ttable-figures-integer = 1,\n",
680
+ "\ttable-figures-decimal = 3,\n",
681
+ "\ttable-space-text-post = *,\n",
682
+ "\tinput-symbols = {()}\n",
683
+ "}\n",
684
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{4}",
685
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
686
+ "\\toprule\n",
687
+ "& \\multicolumn{4}{c}{Model}\\\\\\cmidrule{2-5}\n",
688
+ "& \\multicolumn{2}{c}{{(1)}} & \\multicolumn{2}{c}{{(2)}} \\\\\\midrule\n",
689
+ tab,
690
+ "\\bottomrule\n",
691
+ "\\end{tabular}\n",
692
+ "\\end{adjustbox}\n",
693
+ "\\end{table}\n")
694
+ sink()
695
+
696
+ ## Create tabular output for YouGov results.
697
+ # Restructure data as a matrix.
698
+ aware_tab <- rbind(do.call("rbind", aware_results))
699
+
700
+ # Reformat data to include three decimal points.
701
+ aware_tab <- apply(aware_tab, 2, function (y) format(round(y, 3) * 100,
702
+ digits = 3))
703
+
704
+ # Relabel mean rows.
705
+ mean_rows <- endsWith(rownames(aware_tab), "mean")
706
+
707
+ # Relabel SE rows.
708
+ se_rows <- endsWith(rownames(aware_tab), "se")
709
+
710
+ # Reformat SE estimates to be within parentheses.
711
+ aware_tab[se_rows,] <- paste0("(", aware_tab[se_rows,], ")")
712
+
713
+ # Remove row names for rows with SE estimates.
714
+ rownames(aware_tab)[se_rows] <- ""
715
+
716
+ # Remove "_mean" indication in mean_rows.
717
+ rownames(aware_tab)[mean_rows] <- gsub("_mean", "",
718
+ rownames(aware_tab)[mean_rows])
719
+
720
+ # Add an empty row, where excluded calculations of responses among skips are
721
+ # noted in the table, and rename the relevant row.
722
+ aware_tab <- rbind(aware_tab, c("|", "|"))
723
+ rownames(aware_tab)[nrow(aware_tab)] <- "Skipped"
724
+
725
+ # Add an empty column to the table, and insert the count column at the relevant
726
+ # indices.
727
+ aware_tab[which(rownames(aware_tab) %in% names(aware_table)),]
728
+ aware_tab <- cbind(aware_tab, "")
729
+ colnames(aware_tab)[ncol(aware_tab)] <- "N"
730
+ aware_tab[which(rownames(aware_tab) %in% names(aware_table)), "N"] <- aware_table
731
+
732
+ # Concatenate data to comport with LaTeX tabular markup.
733
+ aware_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(aware_tab))),
734
+ apply(aware_tab, 1, function (x) {
735
+ paste(x, collapse = " & ")
736
+ }),
737
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
738
+
739
+ # Produce tabular output.
740
+ sink("output/yougov_tab.tex")
741
+ cat("\\begin{table}\n",
742
+ "\\caption{YouGov Survey Responses}\n",
743
+ "\\centering\\small\n",
744
+ "\\sisetup{\n",
745
+ "\tdetect-all,\n",
746
+ "\ttable-number-alignment = center,\n",
747
+ "\ttable-figures-integer = 1,\n",
748
+ "\ttable-figures-decimal = 3,\n",
749
+ "\tinput-symbols = {()}\n",
750
+ "}\n",
751
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
752
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
753
+ "\\toprule\n",
754
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
755
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
756
+ aware_tab,
757
+ "\\bottomrule\n",
758
+ "\\end{tabular}\n",
759
+ "\\end{table}\n")
760
+ sink()
761
+
762
+ ## Create tabular output for attitudinal results.
763
+ # Define matrix object of main results.
764
+ tab_dat <- do.call("cbind", att_results)
765
+
766
+ # Reformat matrix to alternate mean and SE estimates.
767
+ tab <- sapply(seq(0, 8, 2), function (i) {
768
+ matrix(c(t(tab_dat[,1:2 + i])), 14, 1)
769
+ })
770
+
771
+ # Reformat data to include three decimal points.
772
+ tab <- apply(tab, 2, function (y) format(round(y, 3), digits = 3))
773
+
774
+ # Rename rows to indicate mean and SE estimates.
775
+ rownames(tab) <- paste(rep(rownames(tab_dat), each = 2),
776
+ c("mean", "se"), sep = "_")
777
+
778
+ # Relabel mean rows.
779
+ mean_rows <- grep("_mean", rownames(tab))
780
+
781
+ # Relabel SE rows
782
+ se_rows <- grep("_se", rownames(tab))
783
+
784
+ # Reformat SE estimates to be within parentheses.
785
+ tab[se_rows,] <- apply(tab[se_rows,], 1, function (y) {
786
+ paste0("(", gsub(" ", "", y), ")")
787
+ })
788
+
789
+ # Rename rows to improve tabular labels; remove "tpnw_atts, "mean," and "se" row
790
+ # name strings.
791
+ rownames(tab) <- gsub("tpnw_atts|mean$|se$", "", rownames(tab))
792
+
793
+ # Remove leading and tailing underscores.
794
+ rownames(tab) <- gsub("^_|_$", "", rownames(tab))
795
+
796
+ # Remove row names for rows with SE estimates.
797
+ rownames(tab)[se_rows] <- ""
798
+
799
+ # Concatenate data to comport with LaTeX tabular markup.
800
+ tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(tab))),
801
+ apply(tab, 1, function (x) {
802
+ paste(x, collapse = " & ")
803
+ }),
804
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
805
+
806
+ # Produce tabular output.
807
+ sink("output/atts_tab.tex")
808
+ cat("\\begin{table}\n",
809
+ "\\caption{Attitudes Toward Nuclear Weapons by Arm}\n",
810
+ "\\centering\\small\n",
811
+ "\\sisetup{\n",
812
+ "\tdetect-all,\n",
813
+ "\ttable-number-alignment = center,\n",
814
+ "\ttable-figures-integer = 1,\n",
815
+ "\ttable-figures-decimal = 3,\n",
816
+ "\ttable-space-text-post = *,\n",
817
+ "\tinput-symbols = {()}\n",
818
+ "}\n",
819
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
820
+ "{S[table-number-alignment = center, table-column-width=1.25cm]}}\n"),
821
+ "\\toprule\n",
822
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
823
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
824
+ tab,
825
+ "\\bottomrule\n",
826
+ "\\end{tabular}\n",
827
+ "\\end{table}\n")
828
+ sink()
829
+
830
+ ## Create tabular output for results by political party.
831
+ # Restructure data such that mean and SE estimates are alternating rows in a
832
+ # 1 x 6 matrix, in each of five list elements, corresponding to each treatment
833
+ # group; and bind the results for each treatment group.
834
+ pid_tab <- lapply(pid_results, function (x) {
835
+ matrix(unlist(x), nrow = 6, ncol = 1)
836
+ })
837
+ pid_tab <- do.call("cbind", pid_tab)
838
+
839
+ # Assign row names to distinguish results for each partisan group, and mean and
840
+ # SE estimates.
841
+ rownames(pid_tab) <- paste(rep(c("democrat", "independent", "republican"),
842
+ each = 2), c("mean", "se"))
843
+
844
+ # Relabel mean rows.
845
+ mean_rows <- endsWith(rownames(pid_tab), "mean")
846
+
847
+ # Relabel SE rows.
848
+ se_rows <- endsWith(rownames(pid_tab), "se")
849
+
850
+ # Label columns per treatment, for the computation of ATEs.
851
+ colnames(pid_tab) <- c("control", treats)
852
+
853
+ # Compute ATEs, with control as baseline, and update tabular data.
854
+ pid_tab[mean_rows, treats] <- pid_tab[mean_rows, treats] -
855
+ pid_tab[mean_rows, "control"]
856
+
857
+ # Reformat data to include three decimal points.
858
+ pid_tab <- apply(pid_tab, 2, function (y) format(round(y, 3) * 100, digits = 3))
859
+
860
+ # Remove extraneous spacing.
861
+ pid_tab <- gsub(" ", "", pid_tab)
862
+
863
+ # Reformat SE estimates to be within parentheses.
864
+ pid_tab[se_rows,] <- paste0("(", pid_tab[se_rows,], ")")
865
+
866
+ # Remove row names for rows with SE estimates.
867
+ rownames(pid_tab)[se_rows] <- ""
868
+
869
+ # Concatenate data to comport with LaTeX tabular markup.
870
+ pid_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(pid_tab))),
871
+ apply(pid_tab, 1, function (x) {
872
+ paste(x, collapse = " & ")
873
+ }),
874
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
875
+
876
+ # Produce tabular output.
877
+ sink("output/pid_support.tex")
878
+ cat("\\begin{table}\n",
879
+ "\\caption{Support for Joining TPNW by Party ID}\n",
880
+ "\\centering\\small\n",
881
+ "\\sisetup{\n",
882
+ "\tdetect-all,\n",
883
+ "\ttable-number-alignment = center,\n",
884
+ "\ttable-figures-integer = 1,\n",
885
+ "\ttable-figures-decimal = 3,\n",
886
+ "\tinput-symbols = {()}\n",
887
+ "}\n",
888
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
889
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
890
+ "\\toprule\n",
891
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
892
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
893
+ pid_tab,
894
+ "\\bottomrule\n",
895
+ "\\end{tabular}\n",
896
+ "\\end{table}\n")
897
+ sink()
898
+
899
+ ## Create tabular output for results by political ideology.
900
+ # Restructure data such that mean and SE estimates are alternating rows in a
901
+ # 1 x 6 matrix, in each of five list elements, corresponding to each treatment
902
+ # group; and bind the results for each treatment group.
903
+ ideo_tab <- lapply(ideo_results, function (x) {
904
+ matrix(unlist(x), nrow = 6, ncol = 1)
905
+ })
906
+ ideo_tab <- do.call("cbind", ideo_tab)
907
+
908
+ # Assign row names to distinguish results for each idelogical group, and mean
909
+ # and SE estimates.
910
+ rownames(ideo_tab) <- paste(rep(c("liberal", "moderate", "conservative"),
911
+ each = 2), c("mean", "se"))
912
+
913
+ # Reformat data to include three decimal points.
914
+ ideo_tab <- apply(ideo_tab, 2, function (y) format(round(y, 3) * 100,
915
+ digits = 3))
916
+
917
+ # Relabel mean rows.
918
+ mean_rows <- endsWith(rownames(ideo_tab), "mean")
919
+
920
+ # Relabel SE rows.
921
+ se_rows <- endsWith(rownames(ideo_tab), "se")
922
+
923
+ # Reformat SE estimates to be within parentheses.
924
+ ideo_tab[se_rows,] <- paste0("(", ideo_tab[se_rows,], ")")
925
+
926
+ # Remove row names for rows with SE estimates.
927
+ rownames(ideo_tab)[se_rows] <- ""
928
+
929
+ # Concatenate data to comport with LaTeX tabular markup.
930
+ ideo_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(ideo_tab))),
931
+ apply(ideo_tab, 1, function (x) {
932
+ paste(x, collapse = " & ")
933
+ }),
934
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
935
+
936
+ # Produce tabular output.
937
+ sink("output/ideo_support_tab.tex")
938
+ cat("\\begin{table}\n",
939
+ "\\caption{Support for Joining TPNW by Ideology}\n",
940
+ "\\centering\\small\n",
941
+ "\\sisetup{\n",
942
+ "\tdetect-all,\n",
943
+ "\ttable-number-alignment = center,\n",
944
+ "\ttable-figures-integer = 1,\n",
945
+ "\ttable-figures-decimal = 3,\n",
946
+ "\tinput-symbols = {()}\n",
947
+ "}\n",
948
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
949
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
950
+ "\\toprule\n",
951
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
952
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
953
+ ideo_tab,
954
+ "\\bottomrule\n",
955
+ "\\end{tabular}\n",
956
+ "\\end{table}\n")
957
+ sink()
958
+
959
+ ## Create tabular output for weighted main results.
960
+ # Define matrix object of weighted main results.
961
+ w_tab_dat <- do.call("cbind", w_main_results)
962
+
963
+ # Compute weighted control-group means, with SE estimates; define OLS formula.
964
+ w_ctrl_form <- as.formula(paste(join_tpnw, paste(treats,
965
+ collapse = " + "), sep = " ~ "))
966
+
967
+ # Fit the OLS model per the specification and recover the control mean.
968
+ w_ctrl_fit <- lm(w_ctrl_form, data = tpnw,
969
+ weights = anesrake_weight)
970
+
971
+ # Recover the control-group mean.
972
+ w_ctrl_mean <- w_ctrl_fit$coef["(Intercept)"]
973
+
974
+ # Compute control SE.
975
+ w_ctrl_se <- sqrt(diag(vcovHC(w_ctrl_fit, "HC2")))["(Intercept)"]
976
+
977
+
978
+ # Concatenate mean and SE output with blank values for Model 2.
979
+ w_ctrl_results <- c(format(round(c(w_ctrl_mean, w_ctrl_se), 3) * 100,
980
+ digits = 2), "|", "|")
981
+
982
+ # Reformat data to include a decimal point.
983
+ w_tab_dat <- apply(w_tab_dat, 2, function (y) format(round(y, 3) * 100,
984
+ digits = 2))
985
+
986
+ # Bind control-group means with main results data.
987
+ w_tab <- rbind(w_ctrl_results, w_tab_dat)
988
+
989
+ # Rename row containing control-group means.
990
+ rownames(w_tab)[which(rownames(w_tab) == "1")] <- "control_mean"
991
+
992
+ # Relabel coefficient columns.
993
+ coef_cols <- grep("coef$", colnames(w_tab))
994
+
995
+ # Relabel SE columns.
996
+ se_cols <- grep("se$", colnames(w_tab))
997
+
998
+ # Reformat SE estimates to be within parentheses.
999
+ w_tab[,se_cols] <- apply(w_tab[, se_cols], 2, function (y) paste0("(", y, ")"))
1000
+
1001
+ # Concatenate data to comport with LaTeX tabular markup.
1002
+ w_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(w_tab))),
1003
+ apply(w_tab, 1, function (x) {
1004
+ paste(x, collapse = " & ")
1005
+ }), sep = " & "), collapse = " \\\\\n"), "\\\\\n")
1006
+
1007
+ # Produce tabular output.
1008
+ sink("output/weighted_main_results_tab.tex")
1009
+ cat("\\begin{table}\n",
1010
+ "\\caption{Estimated Treatment Effects on Support for TPNW (Weighted)}\n",
1011
+ "\\begin{adjustbox}{width = \\textwidth, center}\n",
1012
+ "\\sisetup{\n",
1013
+ "\tdetect-all,\n",
1014
+ "\ttable-number-alignment = center,\n",
1015
+ "\ttable-figures-integer = 1,\n",
1016
+ "\ttable-figures-decimal = 3,\n",
1017
+ "\ttable-space-text-post = *,\n",
1018
+ "\tinput-symbols = {()}\n",
1019
+ "}\n",
1020
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{4}",
1021
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
1022
+ "\\toprule\n",
1023
+ "& \\multicolumn{4}{c}{Model}\\\\\\cmidrule{2-5}\n",
1024
+ "& \\multicolumn{2}{c}{{(1)}} & \\multicolumn{2}{c}{{(2)}} \\\\\\midrule\n",
1025
+ w_tab,
1026
+ "\\bottomrule\n",
1027
+ "\\end{tabular}\n",
1028
+ "\\end{adjustbox}\n",
1029
+ "\\end{table}\n")
1030
+ sink()
1031
+
1032
+ ### Save image containing all objects.
1033
+ save.image(file = "output/hbg_replication_out.RData")
1/replication_package/scripts/hbg_cleaning.R ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Initialize workspace.
2
+ rm(list = ls(all = TRUE))
3
+ setwd("~/Downloads/hbg_replication")
4
+
5
+ # Load required packages
6
+ library(plyr)
7
+ library(car)
8
+ library(anesrake)
9
+
10
+ # Load relevant functions.
11
+ source("scripts/helper_functions.R")
12
+
13
+ ## Load data.
14
+ # Load TPNW experimental data.
15
+ tpnw <- read.csv("data/tpnw_raw.csv", stringsAsFactors = FALSE, row.names = 1)
16
+
17
+ # Load original income question data.
18
+ orig_inc <- read.csv("data/tpnw_orig_income.csv", stringsAsFactors = FALSE,
19
+ row.names = 1)
20
+
21
+ # Load YouGov data (including covariates and awareness question).
22
+ aware <- read.csv("data/tpnw_aware_raw.csv", stringsAsFactors = FALSE,
23
+ row.names = 1)
24
+
25
+ ### Clean TPNW data.
26
+ ## Clean data.
27
+ # Remove first two (extraneous) rows.
28
+ tpnw <- tpnw[-c(1, 2),]
29
+ orig_inc <- orig_inc[-c(1, 2),]
30
+
31
+ # Remove respondents who did not consent.
32
+ tpnw <- tpnw[tpnw$consent == "1",]
33
+ orig_inc <- orig_inc[orig_inc$consent == "1",]
34
+
35
+ # Coalesce income variables.
36
+ orig_inc <- within(orig_inc, {
37
+ income <- as.numeric(income)
38
+ income <- ifelse(income < 1000, NA, income)
39
+ income <- ifelse(income < 15000, 1, income)
40
+ income <- ifelse(income >= 15000 & income < 25000, 2, income)
41
+ income <- ifelse(income >= 25000 & income < 50000, 3, income)
42
+ income <- ifelse(income >= 50000 & income < 75000, 4, income)
43
+ income <- ifelse(income >= 75000 & income < 100000, 5, income)
44
+ income <- ifelse(income >= 100000 & income < 150000, 6, income)
45
+ income <- ifelse(income >= 150000 & income < 200000, 7, income)
46
+ income <- ifelse(income >= 200000 & income < 250000, 8, income)
47
+ income <- ifelse(income >= 250000 & income < 500000, 9, income)
48
+ income <- ifelse(income >= 500000 & income < 1000000, 10, income)
49
+ income <- ifelse(income >= 1000000, 11, income)
50
+ })
51
+ orig_inc <- data.frame(pid = orig_inc$pid, income_old = orig_inc$income)
52
+ tpnw <- plyr::join(tpnw, orig_inc, by = "pid", type = "left")
53
+ tpnw <- within(tpnw, {
54
+ income <- coalesce(as.numeric(income), as.numeric(income_old))
55
+ })
56
+
57
+ # Note meta variables.
58
+ meta <- c("consent", "confirmation_code", "new_income_q")
59
+
60
+ # Note Qualtrics variables.
61
+ qualtrics_vars <- c("StartDate", "EndDate", "Status", "Progress",
62
+ "Duration..in.seconds.", "Finished", "RecordedDate",
63
+ "DistributionChannel", "UserLanguage")
64
+
65
+ # Note Dynata variables.
66
+ dynata_vars <- c("pid", "psid")
67
+
68
+ # Note non-numeric variables.
69
+ char_vars <- c(qualtrics_vars, dynata_vars,
70
+ c("ResponseId"), names(tpnw)[grep("text", tolower(names(tpnw)))])
71
+ char_cols <- which(names(tpnw) %in% char_vars)
72
+
73
+ # Numericize other variables
74
+ tpnw <- data.frame(apply(tpnw[, -char_cols], 2, as.numeric), tpnw[char_cols])
75
+
76
+ tpnw_atts <- which(names(tpnw) %in% c("danger", "peace", "safe", "use_unaccept",
77
+ "always_cheat", "cannot_elim", "slow_reduc"))
78
+ names(tpnw)[tpnw_atts] <- paste("tpnw_atts", names(tpnw)[tpnw_atts], sep = "_")
79
+
80
+ # Coalesce relevant variables.
81
+ tpnw <- within(tpnw, {
82
+ # Clean gender variable.
83
+ female <- ifelse(gender == 95, NA, gender)
84
+
85
+ # Transform birthyr variable to age.
86
+ age <- 2019 - birthyr
87
+
88
+ # Transform income variable.
89
+ income <- car::recode(income, "95 = NA")
90
+
91
+ # Combine pid and pid_forc variables.
92
+ pid3 <- ifelse(pid3 == 0, pid_forc, pid3)
93
+
94
+ # Recode ideology variable.
95
+ ideo <- car::recode(ideo, "3 = NA")
96
+
97
+ # Recode education variable.
98
+ educ <- car::recode(educ, "95 = NA")
99
+
100
+ # Recode state variable.
101
+ state <- recode(state, "1 = 'Alabama';
102
+ 2 = 'Alaska';
103
+ 4 = 'Arizona';
104
+ 5 = 'Arkansas';
105
+ 6 = 'California';
106
+ 8 = 'Colorado';
107
+ 9 = 'Connecticut';
108
+ 10 = 'Delaware';
109
+ 11 = 'Washington DC';
110
+ 12 = 'Florida';
111
+ 13 = 'Georgia';
112
+ 15 = 'Hawaii';
113
+ 16 = 'Idaho';
114
+ 17 = 'Illinois';
115
+ 18 = 'Indiana';
116
+ 19 = 'Iowa';
117
+ 20 = 'Kansas';
118
+ 21 = 'Kentucky';
119
+ 22 = 'Louisiana';
120
+ 23 = 'Maine';
121
+ 24 = 'Maryland';
122
+ 25 = 'Massachusetts';
123
+ 26 = 'Michigan';
124
+ 27 = 'Minnesota';
125
+ 28 = 'Mississippi';
126
+ 29 = 'Missouri';
127
+ 30 = 'Montana';
128
+ 31 = 'Nebraska';
129
+ 32 = 'Nevada';
130
+ 33 = 'New Hampshire';
131
+ 34 = 'New Jersey';
132
+ 35 = 'New Mexico';
133
+ 36 = 'New York';
134
+ 37 = 'North Carolina';
135
+ 38 = 'North Dakota';
136
+ 39 = 'Ohio';
137
+ 40 = 'Oklahoma';
138
+ 41 = 'Oregon';
139
+ 42 = 'Pennsylvania';
140
+ 44 = 'Rhode Island';
141
+ 45 = 'South Carolina';
142
+ 46 = 'South Dakota';
143
+ 47 = 'Tennessee';
144
+ 48 = 'Texas';
145
+ 49 = 'Utah';
146
+ 50 = 'Vermont';
147
+ 51 = 'Virginia';
148
+ 53 = 'Washington';
149
+ 54 = 'West Virginia';
150
+ 55 = 'Wisconsin';
151
+ 56 = 'Wyoming'")
152
+
153
+ # Create regional indicators.
154
+ northeast <- state %in% c("Connecticut", "Maine", "Massachusetts",
155
+ "New Hampshire", "Rhode Island", "Vermont",
156
+ "New Jersey", "New York", "Pennsylvania")
157
+ midwest <- state %in% c("Illinois", "Indiana", "Michigan", "Ohio",
158
+ "Wisconsin", "Iowa", "Kansas", "Minnesota",
159
+ "Missouri", "Nebraska", "North Dakota",
160
+ "South Dakota")
161
+ south <- state %in% c("Delaware", "Florida", "Georgia", "Maryland",
162
+ "North Carolina", "South Carolina", "Virginia",
163
+ "Washington DC", "West Virginia", "Alabama",
164
+ "Kentucky", "Mississippi", "Tennessee", "Arkansas",
165
+ "Louisiana", "Oklahoma", "Texas")
166
+ west <- state %in% c("Arizona", "Colorado", "Idaho", "Montana", "Nevada",
167
+ "New Mexico", "Utah", "Wyoming", "Alaska",
168
+ "California", "Hawaii", "Oregon", "Washington")
169
+
170
+ # Recode join_tpnw outcome.
171
+ join_tpnw <- car::recode(join_tpnw, "2 = 0")
172
+
173
+ # Create indicator variables for each treatment arm.
174
+ control <- treatment == 0
175
+ group_cue <- treatment == 1
176
+ security_cue <- treatment == 2
177
+ norms_cue <- treatment == 3
178
+ institutions_cue <- treatment == 4
179
+
180
+ # Recode attitudinal outcomes.
181
+ tpnw_atts_danger <- recode(tpnw_atts_danger, "-2 = 2; -1 = 1; 1 = -1; 2 = -2")
182
+ tpnw_atts_use_unaccept <- recode(tpnw_atts_use_unaccept, "-2 = 2; -1 = 1;
183
+ 1 = -1; 2 = -2")
184
+ tpnw_atts_always_cheat <- recode(tpnw_atts_always_cheat, "-2 = 2; -1 = 1;
185
+ 1 = -1; 2 = -2")
186
+ tpnw_atts_cannot_elim <- recode(tpnw_atts_cannot_elim, "-2 = 2; -1 = 1;
187
+ 1 = -1; 2 = -2")
188
+ })
189
+
190
+ # Use mean imputation for missingness.
191
+ # Redefine char_cols object.
192
+ char_cols <- which(names(tpnw) %in% c(char_vars, meta, "state", "pid_forc",
193
+ "income_old", "gender"))
194
+
195
+ # Define out_vars object.
196
+ out_vars <- which(names(tpnw) %in% c("join_tpnw", "n_nukes", "n_tests") |
197
+ startsWith(names(tpnw), "tpnw_atts") |
198
+ startsWith(names(tpnw), "physical_eff") |
199
+ startsWith(names(tpnw), "testing_matrix"))
200
+
201
+ # Mean impute.
202
+ tpnw[,-c(char_cols, out_vars)] <-
203
+ data.frame(apply(tpnw[, -c(char_cols, out_vars)], 2, function (x) {
204
+ replace(x, is.na(x), mean(x, na.rm = TRUE))
205
+ }))
206
+
207
+ ### Clean YouGov data.
208
+ ## Indicate all non-numeric variables.
209
+ # Indicate YouGov metadata variables (e.g., start/end time, respondent ID) that
210
+ # may contain characters.
211
+ yougov_vars <- c("starttime", "endtime")
212
+
213
+ # Numericize all numeric variables
214
+ aware <- data.frame(apply(aware[, -which(names(aware) %in% yougov_vars)], 2,
215
+ as.numeric), aware[which(names(aware) %in% yougov_vars)])
216
+
217
+ # Coalesce relevant variables.
218
+ aware <- within(aware, {
219
+ # Clean gender variable to an indicator of female gender (renamed below).
220
+ gender <- recode(gender, "8 = NA") - 1
221
+
222
+ # Transform birthyr variable to age (renamed below).
223
+ birthyr <- 2020 - birthyr
224
+
225
+ # Recode pid3 variable.
226
+ pid3 <- recode(pid3, "1 = -1; 2 = 1; 3 = 0; c(5, 8, 9) = NA")
227
+
228
+ # Recode pid7
229
+ pid7 <- recode(pid7, "1 = -3; 2 = -2; 3 = -1; 4 = 0; 5 = 1; 6 = 2; 7 = 3;
230
+ c(8, 98) = NA")
231
+
232
+ # Code pid variable from pid7.
233
+ party <- recode(pid7, "c(-3, -2, -1) = -1; c(1, 2, 3) = 1")
234
+
235
+ # Recode ideology variable.
236
+ ideo5 <- recode(ideo5, "c(6, 8, 9) = NA") - 3
237
+
238
+ # Recode education variable.
239
+ educ <- recode(educ, "c(8, 9) = NA")
240
+
241
+ # Recode state variable.
242
+ state <- recode(inputstate, "1 = 'Alabama';
243
+ 2 = 'Alaska';
244
+ 4 = 'Arizona';
245
+ 5 = 'Arkansas';
246
+ 6 = 'California';
247
+ 8 = 'Colorado';
248
+ 9 = 'Connecticut';
249
+ 10 = 'Delaware';
250
+ 11 = 'Washington DC';
251
+ 12 = 'Florida';
252
+ 13 = 'Georgia';
253
+ 15 = 'Hawaii';
254
+ 16 = 'Idaho';
255
+ 17 = 'Illinois';
256
+ 18 = 'Indiana';
257
+ 19 = 'Iowa';
258
+ 20 = 'Kansas';
259
+ 21 = 'Kentucky';
260
+ 22 = 'Louisiana';
261
+ 23 = 'Maine';
262
+ 24 = 'Maryland';
263
+ 25 = 'Massachusetts';
264
+ 26 = 'Michigan';
265
+ 27 = 'Minnesota';
266
+ 28 = 'Mississippi';
267
+ 29 = 'Missouri';
268
+ 30 = 'Montana';
269
+ 31 = 'Nebraska';
270
+ 32 = 'Nevada';
271
+ 33 = 'New Hampshire';
272
+ 34 = 'New Jersey';
273
+ 35 = 'New Mexico';
274
+ 36 = 'New York';
275
+ 37 = 'North Carolina';
276
+ 38 = 'North Dakota';
277
+ 39 = 'Ohio';
278
+ 40 = 'Oklahoma';
279
+ 41 = 'Oregon';
280
+ 42 = 'Pennsylvania';
281
+ 44 = 'Rhode Island';
282
+ 45 = 'South Carolina';
283
+ 46 = 'South Dakota';
284
+ 47 = 'Tennessee';
285
+ 48 = 'Texas';
286
+ 49 = 'Utah';
287
+ 50 = 'Vermont';
288
+ 51 = 'Virginia';
289
+ 53 = 'Washington';
290
+ 54 = 'West Virginia';
291
+ 55 = 'Wisconsin';
292
+ 56 = 'Wyoming'")
293
+
294
+ # Define US Census geographic regions.
295
+ northeast <- inputstate %in% c(9, 23, 25, 33, 44, 50, 34, 36, 42)
296
+ midwest <- inputstate %in% c(18, 17, 26, 39, 55, 19, 20, 27, 29, 31, 38, 46)
297
+ south <- inputstate %in% c(10, 11, 12, 13, 24, 37, 45, 51,
298
+ 54, 1, 21, 28, 47, 5, 22, 40, 48)
299
+ west <- inputstate %in% c(4, 8, 16, 35, 30, 49, 32, 56, 2, 6, 15, 41, 53)
300
+
301
+ # Recode employment.
302
+ employ <- recode(employ, "c(9, 98, 99) = NA")
303
+
304
+ # Recode outcome.
305
+ awareness <- recode(awareness, "8 = NA")
306
+
307
+ # Normalize weights.
308
+ weight <- weight / sum(weight)
309
+ })
310
+
311
+ # Rename demographic questions.
312
+ aware <- rename(aware, c("gender" = "female", "birthyr" = "age",
313
+ "faminc_new" = "income", "ideo5" = "ideo"))
314
+
315
+ ## Impute missing values.
316
+ # Specify non-covariate numerical variables (other is exempted since over 10% of
317
+ # responses are missing; state is exempted since the variable is categorical).
318
+ non_covars <- names(aware)[names(aware) %in% c("caseid", "starttime", "endtime",
319
+ "awareness", "state", "weight")]
320
+
321
+ # Use mean imputation for missingness in covariates.
322
+ aware[, -which(names(aware) %in% non_covars)] <-
323
+ data.frame(apply(aware[, -which(names(aware) %in%
324
+ non_covars)], 2, function (x) {
325
+ replace(x, is.na(x), mean(x, na.rm = TRUE))
326
+ }))
327
+
328
+ ### Produce weights for TPNW experimental data using anesrake.
329
+ ## Create unique identifier variable for assigning weights.
330
+ tpnw$caseid <- 1:nrow(tpnw)
331
+
332
+ ## Recode relevant covariates for reweighting: coarsen age; recode female; and
333
+ ## recode geographic covariates.
334
+ # Coarsen age into a categorical variable for age groups.
335
+ tpnw$age_wtng <- cut(tpnw$age, c(0, 25, 35, 45, 55, 65, 99))
336
+ levels(tpnw$age_wtng) <- c("age1824", "age2534", "age3544",
337
+ "age4554", "age5564", "age6599")
338
+
339
+ # Recode female as a factor to account for NA values.
340
+ tpnw$female_wtng <- as.factor(tpnw$female)
341
+ levels(tpnw$female_wtng) <- c("male", "na", "female")
342
+
343
+ # Recode northeast as a factor.
344
+ tpnw$northeast_wtng <- as.factor(tpnw$northeast)
345
+ levels(tpnw$northeast_wtng) <- c("other", "northeast")
346
+
347
+ # Recode midwest as a factor.
348
+ tpnw$midwest_wtng <- as.factor(tpnw$midwest)
349
+ levels(tpnw$midwest_wtng) <- c("other", "midwest")
350
+
351
+ # Recode south as a factor.
352
+ tpnw$south_wtng <- as.factor(tpnw$south)
353
+ levels(tpnw$south_wtng) <- c("other", "south")
354
+
355
+ # Recode west as a factor.
356
+ tpnw$west_wtng <- as.factor(tpnw$west)
357
+ levels(tpnw$west_wtng) <- c("other", "west")
358
+
359
+ ## Specify population targets for balancing (from US Census 2018 data).
360
+ # Specify gender proportion targets and assign names to comport with factors.
361
+ femaletarg <- c(.508, 0, .492)
362
+ names(femaletarg) <- c("female", "na", "male")
363
+
364
+ # Specify age-group proportion targets and assign names to comport with factors.
365
+ agetarg <- c(29363, 44854, 40659, 41537, 41700, 51080)/249193
366
+ names(agetarg) <- c("age1824", "age2534", "age3544",
367
+ "age4554", "age5564", "age6599")
368
+
369
+ # Specify northeast proportion targets and assign names to comport with factors.
370
+ northeasttarg <- c(1 - .173, .173)
371
+ names(northeasttarg) <- c("other", "northeast")
372
+
373
+ # Specify midwest proportion targets and assign names to comport with factors.
374
+ midwesttarg <- c(1 - .209, .209)
375
+ names(midwesttarg) <- c("other", "midwest")
376
+
377
+ # Specify south proportion targets and assign names to comport with factors.
378
+ southtarg <- c(1 - .380, .380)
379
+ names(southtarg) <- c("other", "south")
380
+
381
+ # Specify west proportion targets and assign names to comport with factors.
382
+ westtarg <- c(1 - .238, .238)
383
+ names(westtarg) <- c("other", "west")
384
+
385
+ # Create a list of all targets, with names to comport with relevant variables.
386
+ targets <- list(femaletarg, agetarg, northeasttarg,
387
+ midwesttarg, southtarg, westtarg)
388
+ names(targets) <- c("female_wtng", "age_wtng", "northeast_wtng",
389
+ "midwest_wtng", "south_wtng", "west_wtng")
390
+
391
+ # Produce anesrake weights.
392
+ anesrake_out <- anesrake(targets, tpnw, caseid = tpnw$caseid,
393
+ verbose = TRUE)
394
+
395
+ # Append anesrake weights to TPNW experimental data.
396
+ tpnw$anesrake_weight <- anesrake_out$weightvec
397
+
398
+ # Remove variables used for weighting.
399
+ tpnw <- tpnw[-grep("wtng$", names(tpnw))]
400
+
401
+ ## Write data.
402
+ # Write full experimental dataset.
403
+ write.csv(tpnw, "data/tpnw_data.csv")
404
+
405
+ # write full YouGov dataset.
406
+ write.csv(aware, "data/tpnw_aware.csv")
1/replication_package/scripts/hbg_group_cue.R ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Initialize workspace.
2
+ # Remove objects.
3
+ rm(list = ls(all = TRUE))
4
+
5
+ ## Generate data.
6
+ # Create count object storing count data.
7
+ count <- as.matrix(c(1547, 54, 2346))
8
+
9
+ # Convert count object to an object storing percentages.
10
+ perc <- sapply(count, function (x) x/sum(count))
11
+
12
+ # Create a cumulative percentage object.
13
+ cum_perc <- cumsum(perc)
14
+
15
+ # Create separate objects for the plotting of each proportion.
16
+ power_x <- c(0, rep(.74, 2), 0)
17
+ both_x <- c(.74, rep(.96, 2), .74)
18
+ weap_x <- c(.96, rep(1, 2), .96)
19
+
20
+ # Create an object representing the y-axis plotting points for each polygon.
21
+ plot_y <- c(2.25, 2.25, 3, 3)
22
+
23
+ # Open new .pdf file.
24
+ setEPS()
25
+ postscript("fgc1.eps", width = 10, height = 3)
26
+
27
+ # Modify graphical parameters (margins).
28
+ par(mar = c(0, 6, 6, 1))
29
+
30
+ # Create an empty plot.
31
+ plot(1, type = "n", xlab = "", ylab = "", xlim = c(0, 1), ylim = c(1.5, 3), axes = FALSE)
32
+
33
+ # Create polygons representing each proportion.
34
+ polygon(power_x, plot_y, col = "#FF8F37", border = "white")
35
+ polygon(both_x, plot_y, col = "steelblue3", border = "white")
36
+ polygon(weap_x, plot_y, col = "gray", border = "white")
37
+
38
+ # Create an axis and tick and axis labels.
39
+ axis(side = 3, at = seq(0, 1, .1), labels = FALSE)
40
+ text(x = seq(0, 1, .2), y = par("usr")[4] + .2, labels = c("0%", "20%", "40%", "60%", "80%", "100%"), xpd = TRUE)
41
+ mtext(text = "Proportion of Responses", side = 3, line = 2.5, cex = 1.25, font = 2)
42
+
43
+ # Add text denoting the percentage number associated of each proportion.
44
+ text(x = .74/2, y = 2.2, pos = 1, cex = 2, labels = "74%", col = "#FF8F37", font = 2)
45
+ text(x = .85, y = 2.2, pos = 1, cex = 2, labels = "22%", col = "steelblue3", font = 2)
46
+ text(x = .98, y = 2.2, labels = "4%", pos = 1, cex = 2, col = "grey", font = 2, xpd = TRUE)
47
+
48
+ # Add a legend.
49
+ leg = legend(x = -.16,, y = 2.625, legend = c("Oppose", "Support", "Prefer not\nto answer"), xpd = TRUE,
50
+ pch = 16, col = c("#FF8F37", "steelblue3", "grey"), box.lty = 0, cex = .9, y.intersp = 1.5, yjust = .5)
51
+
52
+ # Close the device.
53
+ dev.off()
1/replication_package/scripts/helper_functions.R ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define coalesce function for recoding of post-election thermometers.
2
+ coalesce <- function (...) {
3
+ Reduce(function(x, y) {
4
+ i <- which(is.na(x))
5
+ x[i] <- y[i]
6
+ x},
7
+ list(...))
8
+ }
9
+
10
+ # Define capwords() function from the toupper() documentation.
11
+ capwords <- function(s, strict = FALSE) {
12
+ cap <- function(s) paste(toupper(substring(s, 1, 1)),
13
+ {s <- substring(s, 2); if(strict) tolower(s) else s},
14
+ sep = "", collapse = " " )
15
+ sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
16
+ }
1/replication_package/scripts/run_hbg_replication.R ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Initialize workspace.
2
+ # Clear workspace.
3
+ rm(list = ls(all = TRUE))
4
+
5
+ # Set working directory to abp_replication directory.
6
+ setwd("~/Downloads/hbg_replication")
7
+
8
+ ## Prepare output directory and main output files.
9
+ # If an output directory does not exist, create the directory.
10
+ if (!file.exists("output")) {
11
+ dir.create("output")
12
+ }
13
+
14
+ # Create a log file for console output.
15
+ hbg_log <- file("output/hbg_log.txt", open = "wt")
16
+
17
+ # Echo and sink console log to psv_log file.
18
+ sink(hbg_log, append = TRUE)
19
+ sink(hbg_log, append = TRUE, type = "message")
20
+
21
+ ## Replicate files and produce main output.
22
+ # Run abp_replication_code.R script, storing run-time statistics.
23
+ run_time <- system.time({source("scripts/hbg_cleaning.R", echo = TRUE,
24
+ max.deparse.length = 10000)
25
+ source("scripts/hbg_analysis.R", echo = TRUE,
26
+ max.deparse.length = 10000)})
27
+
28
+ # Close main output sink.
29
+ sink()
30
+ sink(type = "message")
31
+
32
+ ## Sink run-time statistics to a run_time output file.
33
+ run_time_file <- file("output/run_time", open = "wt")
34
+ sink(run_time_file, append = TRUE)
35
+ print(run_time)
36
+ sink()
80/replication_package/replication_code.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
80/replication_package/usa1.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4989666171cd42ab74a4e1a9a5de80f1428a7f02771718466d36582e43721701
3
+ size 786373887