anonymous-submission-acl2025 commited on
Commit
9a65921
·
0 Parent(s):
1/paper.pdf ADDED
The diff for this file is too large to render. See raw diff
 
1/replication-package/.ipynb_checkpoints/data_cleaning-checkpoint.ipynb ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "3b68d946",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pandas as pd\n",
11
+ "import numpy as np\n",
12
+ "\n",
13
+ "# Define coalesce function for recoding of post-election thermometers\n",
14
+ "def coalesce(*args):\n",
15
+ " return pd.Series(args).bfill().iloc[0]\n",
16
+ "\n",
17
+ "# Define a function for recoding values, similar to car::recode in R\n",
18
+ "def recode(series, recode_dict):\n",
19
+ " return series.replace(recode_dict)\n",
20
+ "\n",
21
+ "# Initialize workspace\n",
22
+ "tpnw = pd.read_csv(\"data/tpnw_raw.csv\", index_col=0)\n",
23
+ "orig_inc = pd.read_csv(\"data/tpnw_orig_income.csv\", index_col=0)\n",
24
+ "aware = pd.read_csv(\"data/tpnw_aware_raw.csv\", index_col=0)\n",
25
+ "\n",
26
+ "# Clean TPNW data\n",
27
+ "tpnw = tpnw.iloc[2:, :]\n",
28
+ "orig_inc = orig_inc.iloc[2:, :]\n",
29
+ "\n",
30
+ "tpnw = tpnw[tpnw['consent'] == \"1\"]\n",
31
+ "orig_inc = orig_inc[orig_inc['consent'] == \"1\"]\n",
32
+ "\n",
33
+ "# Coalesce income variables\n",
34
+ "orig_inc['income'] = pd.to_numeric(orig_inc['income'], errors='coerce')\n",
35
+ "orig_inc['income'] = np.where(orig_inc['income'] < 1000, np.nan, orig_inc['income'])\n",
36
+ "orig_inc['income'] = pd.cut(orig_inc['income'], bins=[0, 14999, 24999, 49999, 74999, 99999, 149999, 199999, 249999, 499999, 999999, np.inf], labels=range(1, 12))\n",
37
+ "orig_inc = orig_inc[['pid', 'income']]\n",
38
+ "orig_inc.columns = ['pid', 'income_old']\n",
39
+ "\n",
40
+ "tpnw = pd.merge(tpnw, orig_inc, on=\"pid\", how=\"left\")\n",
41
+ "tpnw['income'] = tpnw.apply(lambda row: coalesce(row['income'], row['income_old']), axis=1)\n",
42
+ "\n",
43
+ "# Note variables\n",
44
+ "meta = [\"consent\", \"confirmation_code\", \"new_income_q\"]\n",
45
+ "qualtrics_vars = [\"StartDate\", \"EndDate\", \"Status\", \"Progress\", \"Duration..in.seconds.\", \"Finished\", \"RecordedDate\", \"DistributionChannel\", \"UserLanguage\"]\n",
46
+ "dynata_vars = [\"pid\", \"psid\"]\n",
47
+ "\n",
48
+ "# Convert character variables to appropriate types and clean data\n",
49
+ "char_vars = qualtrics_vars + dynata_vars + [\"ResponseId\"] + [col for col in tpnw.columns if \"text\" in col.lower()]\n",
50
+ "char_cols = tpnw.columns.isin(char_vars)\n",
51
+ "\n",
52
+ "# Convert all non-character columns to numeric, coercing errors to NaN\n",
53
+ "tpnw.loc[:, ~char_cols] = tpnw.loc[:, ~char_cols].apply(pd.to_numeric, errors='coerce')\n",
54
+ "\n",
55
+ "tpnw_atts = tpnw.columns.isin([\"danger\", \"peace\", \"safe\", \"use_unaccept\", \"always_cheat\", \"cannot_elim\", \"slow_reduc\"])\n",
56
+ "tpnw.columns.values[tpnw_atts] = [\"tpnw_atts_\" + col for col in tpnw.columns[tpnw_atts]]\n",
57
+ "\n",
58
+ "# Coalesce relevant variables\n",
59
+ "tpnw['female'] = np.where(tpnw['gender'] == 95, np.nan, tpnw['gender'])\n",
60
+ "tpnw['age'] = 2019 - tpnw['birthyr']\n",
61
+ "tpnw['income'] = recode(tpnw['income'], {95: np.nan})\n",
62
+ "tpnw['pid3'] = np.where(tpnw['pid3'] == 0, tpnw['pid_forc'], tpnw['pid3'])\n",
63
+ "tpnw['ideo'] = recode(tpnw['ideo'], {3: np.nan})\n",
64
+ "tpnw['educ'] = recode(tpnw['educ'], {95: np.nan})\n",
65
+ "state_recode = {\n",
66
+ " 1: 'Alabama', 2: 'Alaska', 4: 'Arizona', 5: 'Arkansas', 6: 'California', 8: 'Colorado', 9: 'Connecticut', \n",
67
+ " 10: 'Delaware', 11: 'Washington DC', 12: 'Florida', 13: 'Georgia', 15: 'Hawaii', 16: 'Idaho', 17: 'Illinois',\n",
68
+ " 18: 'Indiana', 19: 'Iowa', 20: 'Kansas', 21: 'Kentucky', 22: 'Louisiana', 23: 'Maine', 24: 'Maryland',\n",
69
+ " 25: 'Massachusetts', 26: 'Michigan', 27: 'Minnesota', 28: 'Mississippi', 29: 'Missouri', 30: 'Montana',\n",
70
+ " 31: 'Nebraska', 32: 'Nevada', 33: 'New Hampshire', 34: 'New Jersey', 35: 'New Mexico', 36: 'New York',\n",
71
+ " 37: 'North Carolina', 38: 'North Dakota', 39: 'Ohio', 40: 'Oklahoma', 41: 'Oregon', 42: 'Pennsylvania',\n",
72
+ " 44: 'Rhode Island', 45: 'South Carolina', 46: 'South Dakota', 47: 'Tennessee', 48: 'Texas', 49: 'Utah',\n",
73
+ " 50: 'Vermont', 51: 'Virginia', 53: 'Washington', 54: 'West Virginia', 55: 'Wisconsin', 56: 'Wyoming'\n",
74
+ "}\n",
75
+ "tpnw['state'] = recode(tpnw['state'], state_recode)\n",
76
+ "\n",
77
+ "# Create regional indicators\n",
78
+ "tpnw['northeast'] = tpnw['state'].isin(['Connecticut', 'Maine', 'Massachusetts', 'New Hampshire', 'Rhode Island', 'Vermont', 'New Jersey', 'New York', 'Pennsylvania'])\n",
79
+ "tpnw['midwest'] = tpnw['state'].isin(['Illinois', 'Indiana', 'Michigan', 'Ohio', 'Wisconsin', 'Iowa', 'Kansas', 'Minnesota', 'Missouri', 'Nebraska', 'North Dakota', 'South Dakota'])\n",
80
+ "tpnw['south'] = tpnw['state'].isin(['Delaware', 'Florida', 'Georgia', 'Maryland', 'North Carolina', 'South Carolina', 'Virginia', 'Washington DC', 'West Virginia', 'Alabama', 'Kentucky', 'Mississippi', 'Tennessee', 'Arkansas', 'Louisiana', 'Oklahoma', 'Texas'])\n",
81
+ "tpnw['west'] = tpnw['state'].isin(['Arizona', 'Colorado', 'Idaho', 'Montana', 'Nevada', 'New Mexico', 'Utah', 'Wyoming', 'Alaska', 'California', 'Hawaii', 'Oregon', 'Washington'])\n",
82
+ "\n",
83
+ "# Recode join_tpnw outcome and attitudinal outcomes\n",
84
+ "tpnw['join_tpnw'] = recode(tpnw['join_tpnw'], {2: 0})\n",
85
+ "tpnw['tpnw_atts_danger'] = recode(tpnw['tpnw_atts_danger'], {-2: 2, -1: 1, 1: -1, 2: -2})\n",
86
+ "tpnw['tpnw_atts_use_unaccept'] = recode(tpnw['tpnw_atts_use_unaccept'], {-2: 2, -1: 1, 1: -1, 2: -2})\n",
87
+ "tpnw['tpnw_atts_always_cheat'] = recode(tpnw['tpnw_atts_always_cheat'], {-2: 2, -1: 1, 1: -1, 2: -2})\n",
88
+ "tpnw['tpnw_atts_cannot_elim'] = recode(tpnw['tpnw_atts_cannot_elim'], {-2: 2, -1: 1, 1: -1, 2: -2})\n",
89
+ "\n",
90
+ "# Define indicator variables for each treatment arm\n",
91
+ "tpnw['control'] = tpnw['treatment'] == 0\n",
92
+ "tpnw['group_cue'] = tpnw['treatment'] == 1\n",
93
+ "tpnw['security_cue'] = tpnw['treatment'] == 2\n",
94
+ "tpnw['norms_cue'] = tpnw['treatment'] == 3\n",
95
+ "tpnw['institutions_cue'] = tpnw['treatment'] == 4\n",
96
+ "\n",
97
+ "# Mean imputation for missing values\n",
98
+ "numeric_cols = tpnw.columns[~tpnw.columns.isin(char_vars + meta + [\"state\", \"pid_forc\", \"income_old\", \"gender\"])]\n",
99
+ "tpnw[numeric_cols] = tpnw[numeric_cols].apply(lambda x: x.fillna(x.mean()))\n",
100
+ "\n",
101
+ "# Save the cleaned dataset\n",
102
+ "tpnw.to_csv(\"data/tpnw_data.csv\", index=False)\n"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": 3,
108
+ "id": "3ae7ce11",
109
+ "metadata": {},
110
+ "outputs": [
111
+ {
112
+ "data": {
113
+ "text/html": [
114
+ "<div>\n",
115
+ "<style scoped>\n",
116
+ " .dataframe tbody tr th:only-of-type {\n",
117
+ " vertical-align: middle;\n",
118
+ " }\n",
119
+ "\n",
120
+ " .dataframe tbody tr th {\n",
121
+ " vertical-align: top;\n",
122
+ " }\n",
123
+ "\n",
124
+ " .dataframe thead th {\n",
125
+ " text-align: right;\n",
126
+ " }\n",
127
+ "</style>\n",
128
+ "<table border=\"1\" class=\"dataframe\">\n",
129
+ " <thead>\n",
130
+ " <tr style=\"text-align: right;\">\n",
131
+ " <th></th>\n",
132
+ " <th>StartDate</th>\n",
133
+ " <th>EndDate</th>\n",
134
+ " <th>Status</th>\n",
135
+ " <th>Progress</th>\n",
136
+ " <th>Duration..in.seconds.</th>\n",
137
+ " <th>Finished</th>\n",
138
+ " <th>RecordedDate</th>\n",
139
+ " <th>ResponseId</th>\n",
140
+ " <th>DistributionChannel</th>\n",
141
+ " <th>UserLanguage</th>\n",
142
+ " <th>...</th>\n",
143
+ " <th>age</th>\n",
144
+ " <th>northeast</th>\n",
145
+ " <th>midwest</th>\n",
146
+ " <th>south</th>\n",
147
+ " <th>west</th>\n",
148
+ " <th>control</th>\n",
149
+ " <th>group_cue</th>\n",
150
+ " <th>security_cue</th>\n",
151
+ " <th>norms_cue</th>\n",
152
+ " <th>institutions_cue</th>\n",
153
+ " </tr>\n",
154
+ " </thead>\n",
155
+ " <tbody>\n",
156
+ " <tr>\n",
157
+ " <th>0</th>\n",
158
+ " <td>2019-08-12 16:55:18</td>\n",
159
+ " <td>2019-08-12 16:55:18</td>\n",
160
+ " <td>4</td>\n",
161
+ " <td>100</td>\n",
162
+ " <td>503</td>\n",
163
+ " <td>1</td>\n",
164
+ " <td>2019-08-12 16:55:20</td>\n",
165
+ " <td>R_eKatZ6uLJ2ywYpT</td>\n",
166
+ " <td>anonymous</td>\n",
167
+ " <td>EN</td>\n",
168
+ " <td>...</td>\n",
169
+ " <td>65</td>\n",
170
+ " <td>False</td>\n",
171
+ " <td>False</td>\n",
172
+ " <td>True</td>\n",
173
+ " <td>False</td>\n",
174
+ " <td>False</td>\n",
175
+ " <td>False</td>\n",
176
+ " <td>False</td>\n",
177
+ " <td>True</td>\n",
178
+ " <td>False</td>\n",
179
+ " </tr>\n",
180
+ " <tr>\n",
181
+ " <th>1</th>\n",
182
+ " <td>2019-08-12 16:55:18</td>\n",
183
+ " <td>2019-08-12 16:55:18</td>\n",
184
+ " <td>4</td>\n",
185
+ " <td>100</td>\n",
186
+ " <td>204</td>\n",
187
+ " <td>1</td>\n",
188
+ " <td>2019-08-12 16:55:20</td>\n",
189
+ " <td>R_7ZI2v7y4DbtW1XD</td>\n",
190
+ " <td>anonymous</td>\n",
191
+ " <td>EN</td>\n",
192
+ " <td>...</td>\n",
193
+ " <td>68</td>\n",
194
+ " <td>False</td>\n",
195
+ " <td>False</td>\n",
196
+ " <td>True</td>\n",
197
+ " <td>False</td>\n",
198
+ " <td>True</td>\n",
199
+ " <td>False</td>\n",
200
+ " <td>False</td>\n",
201
+ " <td>False</td>\n",
202
+ " <td>False</td>\n",
203
+ " </tr>\n",
204
+ " <tr>\n",
205
+ " <th>2</th>\n",
206
+ " <td>2019-08-12 16:55:18</td>\n",
207
+ " <td>2019-08-12 16:55:18</td>\n",
208
+ " <td>4</td>\n",
209
+ " <td>100</td>\n",
210
+ " <td>13</td>\n",
211
+ " <td>1</td>\n",
212
+ " <td>2019-08-12 16:55:20</td>\n",
213
+ " <td>R_4UD1j5073pRw8Kx</td>\n",
214
+ " <td>anonymous</td>\n",
215
+ " <td>EN</td>\n",
216
+ " <td>...</td>\n",
217
+ " <td>14</td>\n",
218
+ " <td>False</td>\n",
219
+ " <td>False</td>\n",
220
+ " <td>False</td>\n",
221
+ " <td>False</td>\n",
222
+ " <td>False</td>\n",
223
+ " <td>False</td>\n",
224
+ " <td>False</td>\n",
225
+ " <td>False</td>\n",
226
+ " <td>True</td>\n",
227
+ " </tr>\n",
228
+ " <tr>\n",
229
+ " <th>3</th>\n",
230
+ " <td>2019-08-12 16:55:18</td>\n",
231
+ " <td>2019-08-12 16:55:18</td>\n",
232
+ " <td>4</td>\n",
233
+ " <td>100</td>\n",
234
+ " <td>97</td>\n",
235
+ " <td>1</td>\n",
236
+ " <td>2019-08-12 16:55:20</td>\n",
237
+ " <td>R_7UJx1q2BGBgPR0V</td>\n",
238
+ " <td>anonymous</td>\n",
239
+ " <td>EN</td>\n",
240
+ " <td>...</td>\n",
241
+ " <td>37</td>\n",
242
+ " <td>False</td>\n",
243
+ " <td>True</td>\n",
244
+ " <td>False</td>\n",
245
+ " <td>False</td>\n",
246
+ " <td>False</td>\n",
247
+ " <td>False</td>\n",
248
+ " <td>False</td>\n",
249
+ " <td>True</td>\n",
250
+ " <td>False</td>\n",
251
+ " </tr>\n",
252
+ " <tr>\n",
253
+ " <th>4</th>\n",
254
+ " <td>2019-08-12 16:55:18</td>\n",
255
+ " <td>2019-08-12 16:55:18</td>\n",
256
+ " <td>4</td>\n",
257
+ " <td>100</td>\n",
258
+ " <td>135</td>\n",
259
+ " <td>1</td>\n",
260
+ " <td>2019-08-12 16:55:20</td>\n",
261
+ " <td>R_6VWAec7rMVtbSWp</td>\n",
262
+ " <td>anonymous</td>\n",
263
+ " <td>EN</td>\n",
264
+ " <td>...</td>\n",
265
+ " <td>41</td>\n",
266
+ " <td>False</td>\n",
267
+ " <td>True</td>\n",
268
+ " <td>False</td>\n",
269
+ " <td>False</td>\n",
270
+ " <td>False</td>\n",
271
+ " <td>False</td>\n",
272
+ " <td>True</td>\n",
273
+ " <td>False</td>\n",
274
+ " <td>False</td>\n",
275
+ " </tr>\n",
276
+ " <tr>\n",
277
+ " <th>...</th>\n",
278
+ " <td>...</td>\n",
279
+ " <td>...</td>\n",
280
+ " <td>...</td>\n",
281
+ " <td>...</td>\n",
282
+ " <td>...</td>\n",
283
+ " <td>...</td>\n",
284
+ " <td>...</td>\n",
285
+ " <td>...</td>\n",
286
+ " <td>...</td>\n",
287
+ " <td>...</td>\n",
288
+ " <td>...</td>\n",
289
+ " <td>...</td>\n",
290
+ " <td>...</td>\n",
291
+ " <td>...</td>\n",
292
+ " <td>...</td>\n",
293
+ " <td>...</td>\n",
294
+ " <td>...</td>\n",
295
+ " <td>...</td>\n",
296
+ " <td>...</td>\n",
297
+ " <td>...</td>\n",
298
+ " <td>...</td>\n",
299
+ " </tr>\n",
300
+ " <tr>\n",
301
+ " <th>1214</th>\n",
302
+ " <td>2019-08-12 17:59:25</td>\n",
303
+ " <td>2019-08-12 18:04:27</td>\n",
304
+ " <td>0</td>\n",
305
+ " <td>100</td>\n",
306
+ " <td>302</td>\n",
307
+ " <td>1</td>\n",
308
+ " <td>2019-08-12 18:04:28</td>\n",
309
+ " <td>R_WoOeItZpT8cYDQd</td>\n",
310
+ " <td>anonymous</td>\n",
311
+ " <td>EN</td>\n",
312
+ " <td>...</td>\n",
313
+ " <td>51</td>\n",
314
+ " <td>False</td>\n",
315
+ " <td>False</td>\n",
316
+ " <td>False</td>\n",
317
+ " <td>True</td>\n",
318
+ " <td>False</td>\n",
319
+ " <td>True</td>\n",
320
+ " <td>False</td>\n",
321
+ " <td>False</td>\n",
322
+ " <td>False</td>\n",
323
+ " </tr>\n",
324
+ " <tr>\n",
325
+ " <th>1215</th>\n",
326
+ " <td>2019-08-12 17:59:11</td>\n",
327
+ " <td>2019-08-12 18:04:28</td>\n",
328
+ " <td>0</td>\n",
329
+ " <td>100</td>\n",
330
+ " <td>316</td>\n",
331
+ " <td>1</td>\n",
332
+ " <td>2019-08-12 18:04:29</td>\n",
333
+ " <td>R_2attLt3IeEgUz9s</td>\n",
334
+ " <td>anonymous</td>\n",
335
+ " <td>EN</td>\n",
336
+ " <td>...</td>\n",
337
+ " <td>23</td>\n",
338
+ " <td>False</td>\n",
339
+ " <td>True</td>\n",
340
+ " <td>False</td>\n",
341
+ " <td>False</td>\n",
342
+ " <td>False</td>\n",
343
+ " <td>False</td>\n",
344
+ " <td>True</td>\n",
345
+ " <td>False</td>\n",
346
+ " <td>False</td>\n",
347
+ " </tr>\n",
348
+ " <tr>\n",
349
+ " <th>1216</th>\n",
350
+ " <td>2019-08-12 18:00:08</td>\n",
351
+ " <td>2019-08-12 18:04:58</td>\n",
352
+ " <td>0</td>\n",
353
+ " <td>100</td>\n",
354
+ " <td>290</td>\n",
355
+ " <td>1</td>\n",
356
+ " <td>2019-08-12 18:04:58</td>\n",
357
+ " <td>R_2CKBd1hxhlAU3S7</td>\n",
358
+ " <td>anonymous</td>\n",
359
+ " <td>EN</td>\n",
360
+ " <td>...</td>\n",
361
+ " <td>35</td>\n",
362
+ " <td>True</td>\n",
363
+ " <td>False</td>\n",
364
+ " <td>False</td>\n",
365
+ " <td>False</td>\n",
366
+ " <td>False</td>\n",
367
+ " <td>False</td>\n",
368
+ " <td>False</td>\n",
369
+ " <td>True</td>\n",
370
+ " <td>False</td>\n",
371
+ " </tr>\n",
372
+ " <tr>\n",
373
+ " <th>1217</th>\n",
374
+ " <td>2019-08-12 17:59:11</td>\n",
375
+ " <td>2019-08-12 18:05:39</td>\n",
376
+ " <td>0</td>\n",
377
+ " <td>100</td>\n",
378
+ " <td>387</td>\n",
379
+ " <td>1</td>\n",
380
+ " <td>2019-08-12 18:05:39</td>\n",
381
+ " <td>R_1H7g1o2HWZAipRy</td>\n",
382
+ " <td>anonymous</td>\n",
383
+ " <td>EN</td>\n",
384
+ " <td>...</td>\n",
385
+ " <td>59</td>\n",
386
+ " <td>True</td>\n",
387
+ " <td>False</td>\n",
388
+ " <td>False</td>\n",
389
+ " <td>False</td>\n",
390
+ " <td>False</td>\n",
391
+ " <td>False</td>\n",
392
+ " <td>True</td>\n",
393
+ " <td>False</td>\n",
394
+ " <td>False</td>\n",
395
+ " </tr>\n",
396
+ " <tr>\n",
397
+ " <th>1218</th>\n",
398
+ " <td>2019-08-12 17:59:14</td>\n",
399
+ " <td>2019-08-12 18:15:03</td>\n",
400
+ " <td>0</td>\n",
401
+ " <td>100</td>\n",
402
+ " <td>949</td>\n",
403
+ " <td>1</td>\n",
404
+ " <td>2019-08-12 18:15:03</td>\n",
405
+ " <td>R_velqedV5Yw40R3j</td>\n",
406
+ " <td>anonymous</td>\n",
407
+ " <td>EN</td>\n",
408
+ " <td>...</td>\n",
409
+ " <td>26</td>\n",
410
+ " <td>False</td>\n",
411
+ " <td>True</td>\n",
412
+ " <td>False</td>\n",
413
+ " <td>False</td>\n",
414
+ " <td>False</td>\n",
415
+ " <td>False</td>\n",
416
+ " <td>False</td>\n",
417
+ " <td>False</td>\n",
418
+ " <td>True</td>\n",
419
+ " </tr>\n",
420
+ " </tbody>\n",
421
+ "</table>\n",
422
+ "<p>1219 rows × 46 columns</p>\n",
423
+ "</div>"
424
+ ],
425
+ "text/plain": [
426
+ " StartDate EndDate Status Progress \\\n",
427
+ "0 2019-08-12 16:55:18 2019-08-12 16:55:18 4 100 \n",
428
+ "1 2019-08-12 16:55:18 2019-08-12 16:55:18 4 100 \n",
429
+ "2 2019-08-12 16:55:18 2019-08-12 16:55:18 4 100 \n",
430
+ "3 2019-08-12 16:55:18 2019-08-12 16:55:18 4 100 \n",
431
+ "4 2019-08-12 16:55:18 2019-08-12 16:55:18 4 100 \n",
432
+ "... ... ... ... ... \n",
433
+ "1214 2019-08-12 17:59:25 2019-08-12 18:04:27 0 100 \n",
434
+ "1215 2019-08-12 17:59:11 2019-08-12 18:04:28 0 100 \n",
435
+ "1216 2019-08-12 18:00:08 2019-08-12 18:04:58 0 100 \n",
436
+ "1217 2019-08-12 17:59:11 2019-08-12 18:05:39 0 100 \n",
437
+ "1218 2019-08-12 17:59:14 2019-08-12 18:15:03 0 100 \n",
438
+ "\n",
439
+ " Duration..in.seconds. Finished RecordedDate ResponseId \\\n",
440
+ "0 503 1 2019-08-12 16:55:20 R_eKatZ6uLJ2ywYpT \n",
441
+ "1 204 1 2019-08-12 16:55:20 R_7ZI2v7y4DbtW1XD \n",
442
+ "2 13 1 2019-08-12 16:55:20 R_4UD1j5073pRw8Kx \n",
443
+ "3 97 1 2019-08-12 16:55:20 R_7UJx1q2BGBgPR0V \n",
444
+ "4 135 1 2019-08-12 16:55:20 R_6VWAec7rMVtbSWp \n",
445
+ "... ... ... ... ... \n",
446
+ "1214 302 1 2019-08-12 18:04:28 R_WoOeItZpT8cYDQd \n",
447
+ "1215 316 1 2019-08-12 18:04:29 R_2attLt3IeEgUz9s \n",
448
+ "1216 290 1 2019-08-12 18:04:58 R_2CKBd1hxhlAU3S7 \n",
449
+ "1217 387 1 2019-08-12 18:05:39 R_1H7g1o2HWZAipRy \n",
450
+ "1218 949 1 2019-08-12 18:15:03 R_velqedV5Yw40R3j \n",
451
+ "\n",
452
+ " DistributionChannel UserLanguage ... age northeast midwest south \\\n",
453
+ "0 anonymous EN ... 65 False False True \n",
454
+ "1 anonymous EN ... 68 False False True \n",
455
+ "2 anonymous EN ... 14 False False False \n",
456
+ "3 anonymous EN ... 37 False True False \n",
457
+ "4 anonymous EN ... 41 False True False \n",
458
+ "... ... ... ... .. ... ... ... \n",
459
+ "1214 anonymous EN ... 51 False False False \n",
460
+ "1215 anonymous EN ... 23 False True False \n",
461
+ "1216 anonymous EN ... 35 True False False \n",
462
+ "1217 anonymous EN ... 59 True False False \n",
463
+ "1218 anonymous EN ... 26 False True False \n",
464
+ "\n",
465
+ " west control group_cue security_cue norms_cue institutions_cue \n",
466
+ "0 False False False False True False \n",
467
+ "1 False True False False False False \n",
468
+ "2 False False False False False True \n",
469
+ "3 False False False False True False \n",
470
+ "4 False False False True False False \n",
471
+ "... ... ... ... ... ... ... \n",
472
+ "1214 True False True False False False \n",
473
+ "1215 False False False True False False \n",
474
+ "1216 False False False False True False \n",
475
+ "1217 False False False True False False \n",
476
+ "1218 False False False False False True \n",
477
+ "\n",
478
+ "[1219 rows x 46 columns]"
479
+ ]
480
+ },
481
+ "execution_count": 3,
482
+ "metadata": {},
483
+ "output_type": "execute_result"
484
+ }
485
+ ],
486
+ "source": [
487
+ "tpnw"
488
+ ]
489
+ },
490
+ {
491
+ "cell_type": "code",
492
+ "execution_count": 4,
493
+ "id": "df57469c",
494
+ "metadata": {},
495
+ "outputs": [
496
+ {
497
+ "data": {
498
+ "text/plain": [
499
+ "Index(['StartDate', 'EndDate', 'Status', 'Progress', 'Duration..in.seconds.',\n",
500
+ " 'Finished', 'RecordedDate', 'ResponseId', 'DistributionChannel',\n",
501
+ " 'UserLanguage', 'consent', 'birthyr', 'gender', 'gender_95_TEXT',\n",
502
+ " 'state', 'income', 'educ', 'educ_95_TEXT', 'ideo', 'pid3', 'pid_forc',\n",
503
+ " 'join_tpnw', 'tpnw_atts_danger', 'tpnw_atts_peace', 'tpnw_atts_safe',\n",
504
+ " 'tpnw_atts_use_unaccept', 'tpnw_atts_always_cheat',\n",
505
+ " 'tpnw_atts_cannot_elim', 'tpnw_atts_slow_reduc', 'psid', 'pid',\n",
506
+ " 'new_income_q', 'treatment', 'confirmation_code', 'income_old',\n",
507
+ " 'female', 'age', 'northeast', 'midwest', 'south', 'west', 'control',\n",
508
+ " 'group_cue', 'security_cue', 'norms_cue', 'institutions_cue'],\n",
509
+ " dtype='object')"
510
+ ]
511
+ },
512
+ "execution_count": 4,
513
+ "metadata": {},
514
+ "output_type": "execute_result"
515
+ }
516
+ ],
517
+ "source": [
518
+ "tpnw.columns"
519
+ ]
520
+ },
521
+ {
522
+ "cell_type": "code",
523
+ "execution_count": 9,
524
+ "id": "60f8eb35",
525
+ "metadata": {},
526
+ "outputs": [
527
+ {
528
+ "data": {
529
+ "text/plain": [
530
+ "0 5.000000\n",
531
+ "1 6.000000\n",
532
+ "2 3.979097\n",
533
+ "3 5.000000\n",
534
+ "4 5.000000\n",
535
+ " ... \n",
536
+ "1214 4.000000\n",
537
+ "1215 5.000000\n",
538
+ "1216 5.000000\n",
539
+ "1217 3.000000\n",
540
+ "1218 6.000000\n",
541
+ "Name: educ, Length: 1219, dtype: float64"
542
+ ]
543
+ },
544
+ "execution_count": 9,
545
+ "metadata": {},
546
+ "output_type": "execute_result"
547
+ }
548
+ ],
549
+ "source": [
550
+ "tpnw['educ']"
551
+ ]
552
+ },
553
+ {
554
+ "cell_type": "code",
555
+ "execution_count": null,
556
+ "id": "b0a26a47",
557
+ "metadata": {},
558
+ "outputs": [],
559
+ "source": [
560
+ "'''\n",
561
+ "'join_tpnw' column contains treatment effects on support for the TPNW.\n",
562
+ "'group_cue', 'security_cue', 'norms_cue', 'institutions_cue' columns are boolean values.\n",
563
+ "'''"
564
+ ]
565
+ },
566
+ {
567
+ "cell_type": "code",
568
+ "execution_count": null,
569
+ "id": "c5becc59",
570
+ "metadata": {},
571
+ "outputs": [],
572
+ "source": [
573
+ "# for table1:\n",
574
+ "# bal_covars <- c(\"age\", \"female\", \"northeast\", \"midwest\", \"west\", \n",
575
+ "# \t\t\t\t\"south\", \"income\", \"educ\", \"ideo\", \"pid3\")\n",
576
+ "# for figure1:\n",
577
+ "# 'join_tpnw' column contains treatment effects on support for the TPNW.\n",
578
+ "# 'group_cue', 'security_cue', 'norms_cue', 'institutions_cue' columns are boolean values."
579
+ ]
580
+ }
581
+ ],
582
+ "metadata": {
583
+ "kernelspec": {
584
+ "display_name": "Python 3 (ipykernel)",
585
+ "language": "python",
586
+ "name": "python3"
587
+ },
588
+ "language_info": {
589
+ "codemirror_mode": {
590
+ "name": "ipython",
591
+ "version": 3
592
+ },
593
+ "file_extension": ".py",
594
+ "mimetype": "text/x-python",
595
+ "name": "python",
596
+ "nbconvert_exporter": "python",
597
+ "pygments_lexer": "ipython3",
598
+ "version": "3.11.5"
599
+ }
600
+ },
601
+ "nbformat": 4,
602
+ "nbformat_minor": 5
603
+ }
1/replication-package/README.txt ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### README for Herzog, Baron, and Gibbons (Forthcoming), "Anti-Normative
2
+ ### Messaging, Group Cues, and the Nuclear Ban Treaty"; forthcoming at The
3
+ ### Journal of Politics.
4
+
5
+ ### This README details instructions and files pertaining to survey, data, and
6
+ ### analysis code for replication purposes. Please direct inquiries to
7
8
+
9
+ ## Meta information and instructions.
10
+ # Performance assessments:
11
+ - Measured total run time (seconds), using
12
+ R CMD BATCH run_hbg_replication.R 2>&1 replication_out.out
13
+ - 137.820 (see run_time outfile for machine-specific statistics).
14
+ - Hardware used.
15
+ - Lenovo ThinkPad X1 Carbon 5th Generation;
16
+ - Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz;
17
+ - Physical Memory Array; Maximum Capacity 16GB.
18
+ - Operating system used.
19
+ - Linux Mint 19 Tara Cinnamon 64-bit (4.10.0-38-generic).
20
+
21
+ # Dependencies:
22
+ 1.) R version 3.6.3 (2020-02-09) -- "Holding the Windstock."
23
+ - Required packages.
24
+ - plyr
25
+ - car
26
+ - anesrake
27
+ - sandwich
28
+ 2.) LaTeX (for typesetting tabular output).
29
+ - Required and recommended packages.
30
+ - array
31
+ - booktabs
32
+ - float
33
+ - makecell
34
+ - multirow
35
+ - siunitx
36
+
37
+ # Instructions:
38
+ 1.) Set working directory to the replication-file parent directory.
39
+ - All scripts assume ~/Downloads/hbg_replication as the parent directory.
40
+ 2.) For all operating systems, the scripts/run_hbg_replication.R script may be
41
+ executed in an R instance.
42
+ - Open the hbg_replication.R script in R.
43
+ - Run all commands in the console.
44
+ 3.) For UNIX/UNIX-like systems (MacOS, Linux, Windows 10 Subsysten for Linux),
45
+ it is recommended to run the script in a terminal instance.
46
+ - Enter either
47
+ R CMD BATCH scripts/run_hbg_replication.R 2>&1 cli_script.out
48
+ which will produce an outfile containing command-line interface output in
49
+ the cli_script.out outfile; or,
50
+ Rscript scripts/run_hbg_replication.R
51
+ though Rscript will not echo output.
52
+ 4.) Commands may also be run in an interactive R session without use of
53
+ run_hbg_replication.R, e.g., in RStudio.
54
+ - Working directory will have to be set manually; in the R console, enter
55
+ setwd("~/hbg_replication")
56
+ - The output directory will also need to be created separately; once the
57
+ working directory has been set to the parent directory, in the R console,
58
+ enter
59
+ dir.create("output")
60
+
61
+ ## Directories and files.
62
+ # ./meta:
63
+ 1.) hbg_instrument.pdf
64
+ - Herzog, Baron, and Gibbons (Forthcoming) survey instrument.
65
+ - The instrument does not describe randomization; treatment assignment was
66
+ randomized using Qualtrics embedded data, randomized using Qualtrics'
67
+ internal "Evenly Present Elements" algorithm. Some answer choice options
68
+ were also randomized in order to avoid ordering effects; questions employing
69
+ internal randomization include pid3, join_tpnw, and the row order of the
70
+ attitudinal outcomes battery.
71
+ 2.) hbg_codebook.txt
72
+ - Herzog, Baron, and Gibbons codebook.
73
+ - Details coding values for embedded data and survey questions in
74
+ all included data files.
75
+ - Notes variable recoding values used in cleaned experimental data
76
+ (tpnw_data.csv), used for analysis.
77
+ 3.) hbg_pap.pdf
78
+ - Herzog, Baron, and Gibbons pre-analysis plan.
79
+ - Details all analysis decisions, per research design pre-registered
80
+ with EGAP prior to collecting experimental data.
81
+
82
+ # ./data:
83
+ 1.) tpnw_aware_raw.csv
84
+ - Data from Herzog, Baron, and Gibbons YouGov study.
85
+ - Note that some variables have been excluded as they are used in separate
86
+ studies.
87
+ 2.) tpnw_orig_income.csv
88
+ - Data from original income coding from Herzog, Baron, and Gibbons.
89
+ - Note that some variables have been excluded as they are used in separate
90
+ studies.
91
+ 3.) tpnw_raw.csv
92
+ - Data from Herzog, Baron, and Gibbons experimental survey.
93
+ - Note that some variables have been excluded as they are used in separate
94
+ studies.
95
+
96
+ # ./output (produced by either ../scripts/hbg.sh or
97
+ # ../scripts/run_hbg_replication.R):
98
+ 1.) ./hbg_log.txt
99
+ - Output for experimental data cleaning and analysis (produced by either
100
+ ../scripts/hbg.sh or ../scripts/run_hbg_replication.R).
101
+ 2.) ./run_time
102
+ - Output for total run time (produced by either ../scripts/hbg.sh or
103
+ ../scripts/run_hbg_replication.R).
104
+ 4.) ./fg%.eps
105
+ - .eps images of figures produced by ../scripts/hbg_replication.R);
106
+ inventoried below.
107
+ 5.) ./%_tab.tex
108
+ - .tex files containing LaTeX tables produced by
109
+ ../scripts/hbg_replication.R; inventoried below.
110
+
111
+ # ./scripts:
112
+ 1.) run_hbg_replication.R
113
+ - "Run file" to run replication code and produce console and run-time output
114
+ in R (all systems); produces
115
+ - ../output/hbg_log.txt
116
+ - Output for all analyses and results.
117
+ - ../output/run_time
118
+ - Output for total run time.
119
+ 2.) helper_functions.R
120
+ - R source file containing replication code helper functions.
121
+ 3.) hbg_cleaning.R
122
+ - Cleaning script; outputs cleaned experimental dataset including anesrake
123
+ weights.
124
+ - ../data/tpnw_data.csv
125
+ - Cleaned experimental data.
126
+ - ../data/tpnw_aware.csv
127
+ - Cleaned YouGov data.
128
+ 4.) hbg_analysis.R
129
+ - Analysis script; outputs analysis results in graphical, tabular, and RData
130
+ formats.
131
+ - ../output/fg1.eps
132
+ - .eps image of Figure 1.
133
+ - ../output/%_tab.tex
134
+ - .tex files containing LaTeX markup of all tables.
135
+ - balance_tab.tex
136
+ - Table demonstrating covariate balance across arms.
137
+ - main_results_tab.tex
138
+ - Table containing main results.
139
+ - atts_tab.tex
140
+ - Table containing attitudinal battery results.
141
+ - pid_support_tab.tex
142
+ - Table containing results by partisan identification.
143
+ - ideo_support_tab.tex
144
+ - Table containing results by political ideology.
145
+ - weighted_main_results_tab.tex
146
+ - Table containing weighted main results.
147
+ - ../output/hbg_replication_out.RData
148
+ - .RData file containing all analysis results.
149
+ 5.) hbg_group_cue.R
150
+ - Script to produce group cue graphic.
151
+ - hbg_fgc1.eps
152
+ - .eps image of Figure C1.
1/replication-package/data/tpnw_aware_raw.csv ADDED
The diff for this file is too large to render. See raw diff
 
1/replication-package/data/tpnw_orig_income.csv ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "","consent","income","pid"
2
+ "1","Online Consent to Participate in a Research Study Purpose:We are conducting a research study to examine attitudes relevant to American nuclear policy. Procedures:Participation in this study will involve completing a short survey which will take you approximately 5 minutes. Risks and Benefits:It is unlikely, but possible, that participants in this study may experience distress over the nature of the questions. Although this study will not benefit you personally, we hope that our results will add to the knowledge about public preferences on this topic. Confidentiality:All of your responses will be anonymous. When we publish any results from this study, we will do so in a way that does not identify you. We may also share the data with other researchers so that they can check the accuracy of our conclusions but will only do so if we are confident that your anonymity is protected. Voluntary Participation:Participation in this study is completely voluntary. You are free to decline to participate, to end participation at any time for any reason, or to refuse to answer any individual question. Refusing to participate will involve no penalty or loss of benefits or compensation to which you are otherwise entitled. Questions:If you have any questions about this study, you may contact the investigator, Jonathon Baron at [email protected]. If you would like to talk with someone other than the researcher to discuss problems or concerns, to discuss situations in the event that a member of the research team is not available, or to discuss your rights as a research participant, you may contact the Yale University Human Subjects Committee, (203) 785-4688, [email protected]. Additional information is available at http://www.yale.edu/hrpp/participants/index.html. The IRB Protocol Number is HIC/HSC #2000026191. Additional information is available at http://www.yale.edu/hrpp/participants/index.html.
3
+ Do you voluntarily consent to participate in this study?","What was your total income in 2018, before taxes?
4
+ This figure should include income from all sources, including salaries, wages, pensions, Social Security, dividends, interest, and all other income.
5
+ Type the number. Your best guess is fine.","pid"
6
+ "2","{""ImportId"":""QID3""}","{""ImportId"":""QID10_TEXT""}","{""ImportId"":""pid""}"
7
+ "3","2","","1502062540"
8
+ "4","2","","1625783733"
9
+ "5","1","","1504516479"
10
+ "6","2","","1562320221"
11
+ "7","2","","1504394326"
12
+ "8","2","","1502966236"
13
+ "9","2","","1507996899"
14
+ "10","1","80000","1612594169"
15
+ "11","1","85000","1507144288"
16
+ "12","1","0","1502947386"
17
+ "13","1","150000","1503517729"
18
+ "14","1","55000","1503260415"
19
+ "15","1","12009","1517622277"
20
+ "16","2","","1502135834"
21
+ "17","1","75000","1504815230"
22
+ "18","2","","1503933642"
23
+ "19","1","25000","1512900692"
24
+ "20","1","70000","1503526516"
25
+ "21","1","50000","1458332659"
26
+ "22","1","300000","1503519684"
27
+ "23","1","45000","1502102790"
28
+ "24","1","35000","1507984745"
29
+ "25","1","185000","1502941026"
30
+ "26","1","180000","1503395421"
31
+ "27","1","124000","1501922011"
32
+ "28","1","108000","1589002432"
33
+ "29","1","112000","1502114324"
34
+ "30","1","95000","1505144365"
35
+ "31","1","1200","1609910274"
36
+ "32","1","69500","1502424215"
37
+ "33","1","40000","1501951157"
38
+ "34","1","10000","1507851231"
39
+ "35","1","90000","1501382953"
40
+ "36","1","50000","1501203924"
41
+ "37","1","69000","1502433491"
42
+ "38","1","4","1620578427"
43
+ "39","1","21000","1559531870"
44
+ "40","1","100.000","1503213021"
45
+ "41","1","80000","1502350937"
46
+ "42","1","21000","1506953783"
47
+ "43","1","85000","1503869686"
48
+ "44","1","49000","1508075378"
49
+ "45","1","87000","1502054654"
50
+ "46","1","500000","1624874452"
51
+ "47","1","125000","1503901280"
52
+ "48","1","55555","1503682666"
53
+ "49","1","52000","1502312418"
54
+ "50","2","","1503552191"
55
+ "51","1","55000","1502928920"
56
+ "52","2","","1503699037"
57
+ "53","1","93000","1510340238"
58
+ "54","1","144000","1500599037"
59
+ "55","1","70000","1515055425"
60
+ "56","1","0","1617601247"
61
+ "57","1","50000","1502555585"
62
+ "58","1","205000","1501690743"
63
+ "59","1","34000","1592529445"
64
+ "60","1","54000","1392620222"
65
+ "61","2","","1554962980"
66
+ "62","1","140000","1504441405"
67
+ "63","1","160000","1502946209"
68
+ "64","1","51000","1506255770"
69
+ "65","1","11000","1503786811"
70
+ "66","1","118000","1577043829"
71
+ "67","1","90000","1505682884"
72
+ "68","1","93000","1503495386"
73
+ "69","1","15000","1625921448"
74
+ "70","1","150000","1503656134"
75
+ "71","1","32000","1504880858"
76
+ "72","1","13000","1502121511"
77
+ "73","1","24000","1506997301"
78
+ "74","1","125","1610852417"
79
+ "75","1","60000","1504815464"
80
+ "76","1","65000","1503559519"
81
+ "77","1","15000","1605730719"
82
+ "78","1","190000","1502167950"
83
+ "79","1","120000","1504679237"
84
+ "80","1","0","1505219251"
85
+ "81","1","153000","1503666280"
86
+ "82","1","0","1550550267"
87
+ "83","1","30000","1592892246"
88
+ "84","1","125000","1502960871"
89
+ "85","1","39000","1602114211"
90
+ "86","1","119000","1505219213"
91
+ "87","1","195000","1500931136"
92
+ "88","1","94000","1505081884"
93
+ "89","1","100000","1504446271"
94
+ "90","1","8000","1509011251"
95
+ "91","1","50000","1502052961"
96
+ "92","1","19000","1502783024"
97
+ "93","1","200000","1501213890"
98
+ "94","1","3","1535612551"
99
+ "95","1","40000","1576959406"
100
+ "96","1","5000","1605961384"
101
+ "97","1","7","1610190271"
102
+ "98","1","100000","1503271348"
103
+ "99","1","85000","1505069365"
104
+ "100","1","4","1503009287"
105
+ "101","1","68500","1502770545"
106
+ "102","1","67000","1502123938"
107
+ "103","1","1500","1613576483"
108
+ "104","1","165479","1625257944"
109
+ "105","1","50000","1610773895"
110
+ "106","1","169000","1502770041"
111
+ "107","1","80000","1624729504"
112
+ "108","1","68000","1501342334"
113
+ "109","1","40000","1503051503"
114
+ "110","1","69888","1502067787"
115
+ "111","1","38000","1517528713"
116
+ "112","1","25435","1507972741"
117
+ "113","1","32000","1589493055"
118
+ "114","1","30000","1507645439"
119
+ "115","1","13000","1574624272"
120
+ "116","1","205000","1503862329"
121
+ "117","1","168000","1538677505"
122
+ "118","1","45000","1611277628"
123
+ "119","1","49000","1599859855"
124
+ "120","1","60000","1504820572"
125
+ "121","1","50000","1572910868"
126
+ "122","1","20000","1501025523"
127
+ "123","1","100000","1502462916"
128
+ "124","1","33000","1585194423"
129
+ "125","1","100","1509014810"
130
+ "126","1","100000","1614836888"
131
+ "127","1","39000","1503162728"
132
+ "128","1","145000","1502365890"
133
+ "129","1","75000","1609232949"
134
+ "130","1","110000","1589025244"
135
+ "131","1","70000","1625686657"
136
+ "132","1","20000","1504305795"
137
+ "133","1","49000","1535452703"
138
+ "134","1","110000","1505220197"
139
+ "135","1","25000","1625895630"
140
+ "136","1","55000.00","1504658766"
141
+ "137","1","35000","1513866152"
142
+ "138","1","41000","1528133448"
143
+ "139","1","60000","1505654351"
144
+ "140","1","35000","1507978362"
145
+ "141","1","49000","1507542646"
146
+ "142","1","145000","1504407288"
147
+ "143","1","42000","1562262080"
148
+ "144","1","49000","1501608449"
149
+ "145","1","120000","1505187502"
150
+ "146","1","26000","1507910667"
151
+ "147","1","51000","1610917522"
152
+ "148","1","65000","1507283410"
153
+ "149","1","60000","1625693758"
154
+ "150","1","16900","1625862677"
155
+ "151","1","30000","1507983784"
156
+ "152","1","65000","1598028187"
157
+ "153","1","19000","1508930000"
158
+ "154","1","110000","1500889450"
159
+ "155","1","32000","1504829248"
1/replication-package/data/tpnw_raw.csv ADDED
The diff for this file is too large to render. See raw diff
 
1/replication-package/meta/hbg_codebook.txt ADDED
@@ -0,0 +1,1009 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ================================================================================
2
+ RAW EXPERIMENTAL DATA (tpnw_raw.csv)
3
+ ================================================================================
4
+
5
+ --------------------------------------------------------------------------------
6
+ StartDate
7
+ --------------------------------------------------------------------------------
8
+
9
+ Qualtrics embedded data field (more information is available on Qualtrics'
10
+ website at https://www.qualtrics.com/support/survey-platform/
11
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
12
+
13
+ Date and time that respondent started the survey
14
+
15
+ --------------------------------------------------------------------------------
16
+ EndDate
17
+ --------------------------------------------------------------------------------
18
+
19
+ Qualtrics embedded data field (more information is available on Qualtrics'
20
+ website at https://www.qualtrics.com/support/survey-platform/
21
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
22
+
23
+ Date and time that respondent finished the survey
24
+
25
+ --------------------------------------------------------------------------------
26
+ Status
27
+ --------------------------------------------------------------------------------
28
+
29
+ Qualtrics embedded data field (more information is available on Qualtrics'
30
+ website at https://www.qualtrics.com/support/survey-platform/
31
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
32
+
33
+ Indicator of the type of response collected.
34
+
35
+ 0 - IP Address: A normal response
36
+ 1 - Survey Preview: A preview response
37
+ 2 - Survey Test: A test response
38
+ 4 - Imported: An imported response
39
+ 8 - Spam: A possible spam response
40
+ 9 - Preview Spam: A possible spam response submitted through the preview link
41
+ 12 - Imported Spam: A possible spam response that was imported
42
+ 16 - Offline: A Qualtrics Offline App response
43
+ 17 - Offline Preview: Previews submitted through the Qualtrics Offline App. This
44
+ feature is deprecated in latest versions of the app
45
+
46
+ --------------------------------------------------------------------------------
47
+ Progress
48
+ --------------------------------------------------------------------------------
49
+
50
+ Qualtrics embedded data field (more information is available on Qualtrics'
51
+ website at https://www.qualtrics.com/support/survey-platform/
52
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
53
+
54
+ Indicates the progress a respondent made before finishing the survey.
55
+
56
+ 100 - Respondent completed the survey or was screened out
57
+
58
+ --------------------------------------------------------------------------------
59
+ Duration..in.seconds.
60
+ --------------------------------------------------------------------------------
61
+
62
+ Qualtrics embedded data field (more information is available on Qualtrics'
63
+ website at https://www.qualtrics.com/support/survey-platform/
64
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
65
+
66
+ Number of seconds it took a respondent to complete the survey.
67
+
68
+ --------------------------------------------------------------------------------
69
+ Finished
70
+ --------------------------------------------------------------------------------
71
+
72
+ Qualtrics embedded data field (more information is available on Qualtrics'
73
+ website at https://www.qualtrics.com/support/survey-platform/
74
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
75
+
76
+ Indicates whether a respondent finished the survey.
77
+
78
+ 1 - Respondent finished the survey or was screened out
79
+
80
+ --------------------------------------------------------------------------------
81
+ RecordedDate
82
+ --------------------------------------------------------------------------------
83
+
84
+ Qualtrics embedded data field (more information is available on Qualtrics'
85
+ website at https://www.qualtrics.com/support/survey-platform/
86
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
87
+
88
+ Date that respondent's survey was recorded in Qualtrics.
89
+
90
+ --------------------------------------------------------------------------------
91
+ ResponseId
92
+ --------------------------------------------------------------------------------
93
+
94
+ Qualtrics embedded data field (more information is available on Qualtrics'
95
+ website at https://www.qualtrics.com/support/survey-platform/
96
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
97
+
98
+ Character variable indicating unique respondent ID.
99
+
100
+ --------------------------------------------------------------------------------
101
+ DistributionChannel
102
+ --------------------------------------------------------------------------------
103
+
104
+ Qualtrics embedded data field (more information is available on Qualtrics'
105
+ website at https://www.qualtrics.com/support/survey-platform/
106
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
107
+
108
+ Character variable indicating method of survey distribution.
109
+
110
+ "anonymous" - Survey was distributed without collecting respondent data
111
+
112
+ --------------------------------------------------------------------------------
113
+ UserLanguage
114
+ --------------------------------------------------------------------------------
115
+
116
+ Qualtrics embedded data field (more information is available on Qualtrics'
117
+ website at https://www.qualtrics.com/support/survey-platform/
118
+ data-and-analysis-module/data/download-data/understanding-your-dataset/)
119
+
120
+ Character variable indicating respondent's language code.
121
+
122
+ "EN" - Respondent took the survey in English
123
+
124
+ --------------------------------------------------------------------------------
125
+ psid
126
+ --------------------------------------------------------------------------------
127
+
128
+ Dynata embedded data field
129
+
130
+ Character variable uniquely identifying a respondent and specific project
131
+ (project specific ID).
132
+
133
+ --------------------------------------------------------------------------------
134
+ pid
135
+ --------------------------------------------------------------------------------
136
+
137
+ Dynata embedded data field
138
+
139
+ Numeric variable uniquely identifying a panelist (panelist ID).
140
+
141
+ --------------------------------------------------------------------------------
142
+ consent
143
+ --------------------------------------------------------------------------------
144
+
145
+ Custom embedded data field
146
+
147
+ Indicator of whether a respondent consented to participate in the survey.
148
+
149
+ 0 - Respondent did not consent
150
+ 1 - Respondent consented
151
+
152
+ --------------------------------------------------------------------------------
153
+ new_income_q
154
+ --------------------------------------------------------------------------------
155
+
156
+ Custom embedded data field
157
+
158
+ Indicator of usage of new income demographic question format.
159
+
160
+ "" - Old income question
161
+ 1 - new income question
162
+
163
+ --------------------------------------------------------------------------------
164
+ confirmation_code
165
+ --------------------------------------------------------------------------------
166
+
167
+ Custom embedded data field
168
+
169
+ Numeric code provided to each respondent for response recording after completing
170
+ the survey.
171
+
172
+ --------------------------------------------------------------------------------
173
+ treatment
174
+ --------------------------------------------------------------------------------
175
+
176
+ Custom embedded data field
177
+
178
+ Numeric variable indicating treatment arm to which a respondent was assigned
179
+
180
+ 0 - Control
181
+ 1 - Group Cue
182
+ 2 - Security Cue
183
+ 3 - Norms Cue
184
+ 4 - Institutions Cue
185
+
186
+ --------------------------------------------------------------------------------
187
+ birthyr
188
+ --------------------------------------------------------------------------------
189
+
190
+ Demographic question
191
+
192
+ Respondent's birth year (numeric entry only).
193
+
194
+ --------------------------------------------------------------------------------
195
+ gender
196
+ --------------------------------------------------------------------------------
197
+
198
+ Demographic question
199
+
200
+ Respondent's self-reported gender.
201
+
202
+ 0 - Male
203
+ 1 - Female
204
+ 95 - Other
205
+
206
+ --------------------------------------------------------------------------------
207
+ gender_95_TEXT
208
+ --------------------------------------------------------------------------------
209
+
210
+ Demographic question
211
+
212
+ Respondent's self-reported gender (if Other; text entry).
213
+
214
+ --------------------------------------------------------------------------------
215
+ state
216
+ --------------------------------------------------------------------------------
217
+
218
+ Demographic question
219
+
220
+ Respondent's state of residence (recoded to character strings in cleaned
221
+ experimental data).
222
+
223
+ 1 - Alabama
224
+ 2 - Alaska
225
+ 4 - Arizona
226
+ 5 - Arkansas
227
+ 6 - California
228
+ 8 - Colorado
229
+ 9 - Connecticut
230
+ 10 - Delaware
231
+ 11 - District of Columbia
232
+ 12 - Florida
233
+ 13 - Georgia
234
+ 15 - Hawaii
235
+ 16 - Idaho
236
+ 17 - Illinois
237
+ 18 - Indiana
238
+ 19 - Iowa
239
+ 20 - Kansas
240
+ 21 - Kentucky
241
+ 22 - Louisiana
242
+ 23 - Maine
243
+ 24 - Maryland
244
+ 25 - Massachusetts
245
+ 26 - Michigan
246
+ 27 - Minnesota
247
+ 28 - Mississippi
248
+ 29 - Missouri
249
+ 30 - Montana
250
+ 31 - Nebraska
251
+ 32 - Nevada
252
+ 33 - New Hampshire
253
+ 34 - New Jersey
254
+ 35 - New Mexico
255
+ 36 - New York
256
+ 37 - North Carolina
257
+ 38 - North Dakota
258
+ 39 - Ohio
259
+ 40 - Oklahoma
260
+ 41 - Oregon
261
+ 42 - Pennsylvania
262
+ 44 - Rhode Island
263
+ 45 - South Carolina
264
+ 46 - South Dakota
265
+ 47 - Tennessee
266
+ 48 - Texas
267
+ 49 - Utah
268
+ 50 - Vermont
269
+ 51 - Virginia
270
+ 53 - Washington
271
+ 54 - West Virginia
272
+ 55 - Wisconsin
273
+ 56 - Wyoming
274
+
275
+ --------------------------------------------------------------------------------
276
+ income
277
+ --------------------------------------------------------------------------------
278
+
279
+ Demographic question
280
+
281
+ Respondent's self-reported, pre-tax family income.
282
+
283
+ 1 - Less than $15,000
284
+ 2 - $15,000 to $24,999
285
+ 3 - $25,000 to $49,999
286
+ 4 - $50,000 to $74,999
287
+ 5 - $75,000 to $99,999
288
+ 6 - $100,000 to $149,999
289
+ 7 - $150,000 to $199,999
290
+ 8 - $200,000 to $249,999
291
+ 9 - $250,000 to $499,999
292
+ 10 - $500,000 to $999,999
293
+ 11 - More than $1 million
294
+ 95 - Prefer not to say (recoded to NA in cleaned experimental data)
295
+
296
+ Note: income is coalesced with income from tpnw_orig_income.csv, described
297
+ below.
298
+
299
+ --------------------------------------------------------------------------------
300
+ educ
301
+ --------------------------------------------------------------------------------
302
+
303
+ Demographic question
304
+
305
+ Respondent's self-reported level of education.
306
+
307
+
308
+ 1 - Did not graduate from high school
309
+ 2 - High school graduate or equivalent (for example: GED)
310
+ 3 - Some college, but no degree (yet)
311
+ 4 - 2-year college degree
312
+ 5 - 4-year college degree
313
+ 6 - Postgraduate degree (MA, MBA, MD, JD, PhD, EdD, etc.)
314
+ 95 - Other (recoded to NA in cleaned experimental data)
315
+
316
+ --------------------------------------------------------------------------------
317
+ educ_95_TEXT
318
+ --------------------------------------------------------------------------------
319
+
320
+ Demographic question
321
+
322
+ Respondent's self-reported education level (if Other; text entry).
323
+
324
+ --------------------------------------------------------------------------------
325
+ ideo
326
+ --------------------------------------------------------------------------------
327
+
328
+ Demographic question
329
+
330
+ Respondent's self-reported, five-point political ideology.
331
+
332
+
333
+ -2 - Very liberal
334
+ -1 - Liberal
335
+ 0 - Moderate; middle of the road
336
+ 1 - Conservative
337
+ 2 - Very conservative
338
+ 3 - Haven't thought much about this (recoded to NA in cleaned experimental data)
339
+
340
+ --------------------------------------------------------------------------------
341
+ pid3
342
+ --------------------------------------------------------------------------------
343
+
344
+ Demographic question
345
+
346
+ Three-point partisan identification.
347
+
348
+ -1 - Democrat
349
+ 0 - Independent
350
+ 1 - Republican
351
+
352
+ --------------------------------------------------------------------------------
353
+ pid_forc
354
+ --------------------------------------------------------------------------------
355
+
356
+ Demographic question
357
+
358
+ Follow-up question to pid3; displayed only if pid3 skipped or if respondent
359
+ replied "Independent" to pid3 (coalesced with pid3 in cleaned experimental
360
+ data).
361
+
362
+ -1 - Closer to Democratic
363
+ 0 - Neither
364
+ 1 - Closer to Republican
365
+
366
+ --------------------------------------------------------------------------------
367
+ join_tpnw
368
+ --------------------------------------------------------------------------------
369
+
370
+ Outcome question
371
+
372
+ Respondent's support for joining TPNW.
373
+
374
+ 1 - Yes
375
+ 2 - No (recoded to 0 in cleaned experimental data)
376
+
377
+ --------------------------------------------------------------------------------
378
+ tpnw_atts_danger
379
+ --------------------------------------------------------------------------------
380
+
381
+ Outcome question
382
+
383
+ Nuclear weapons are dangerous and present a threat to the world (reverse-coded
384
+ in cleaned experimental data).
385
+
386
+ 2 - Strongly Agree
387
+ 1 - Agree
388
+ -1 - Disagree
389
+ -2 - Strongly disagree
390
+
391
+ --------------------------------------------------------------------------------
392
+ tpnw_atts_peace
393
+ --------------------------------------------------------------------------------
394
+
395
+ Outcome question
396
+
397
+ Nuclear weapons contribute to peace by preventing conflict between countries.
398
+
399
+ 2 - Strongly Agree
400
+ 1 - Agree
401
+ -1 - Disagree
402
+ -2 - Strongly disagree
403
+
404
+ --------------------------------------------------------------------------------
405
+ tpnw_atts_safe
406
+ --------------------------------------------------------------------------------
407
+
408
+ Outcome question
409
+
410
+ Nuclear weapons help to keep my country safe.
411
+
412
+ 2 - Strongly Agree
413
+ 1 - Agree
414
+ -1 - Disagree
415
+ -2 - Strongly disagree
416
+
417
+ --------------------------------------------------------------------------------
418
+ tpnw_atts_use_unaccept
419
+ --------------------------------------------------------------------------------
420
+
421
+ Outcome question
422
+
423
+ It is unacceptable to use nuclear weapons in any situation (reverse-coded
424
+ in cleaned experimental data).
425
+
426
+ 2 - Strongly Agree
427
+ 1 - Agree
428
+ -1 - Disagree
429
+ -2 - Strongly disagree
430
+
431
+ --------------------------------------------------------------------------------
432
+ tpnw_atts_always_cheat
433
+ --------------------------------------------------------------------------------
434
+
435
+ Outcome question
436
+
437
+ Some countries will always cheat and disobey nuclear treaties (reverse-coded
438
+ in cleaned experimental data).
439
+
440
+ 2 - Strongly Agree
441
+ 1 - Agree
442
+ -1 - Disagree
443
+ -2 - Strongly disagree
444
+
445
+ --------------------------------------------------------------------------------
446
+ tpnw_atts_cannot_elim
447
+ --------------------------------------------------------------------------------
448
+
449
+ Outcome question
450
+
451
+ Now that nuclear weapons exist, they can never be eliminated (reverse-coded
452
+ in cleaned experimental data).
453
+
454
+ 2 - Strongly Agree
455
+ 1 - Agree
456
+ -1 - Disagree
457
+ -2 - Strongly disagree
458
+
459
+ --------------------------------------------------------------------------------
460
+ tpnw_atts_slow_reduc
461
+ --------------------------------------------------------------------------------
462
+
463
+ Outcome question
464
+
465
+ Reducing the number of nuclear weapons over time is safer than immediate nuclear
466
+ disarmament.
467
+
468
+ 2 - Strongly Agree
469
+ 1 - Agree
470
+ -1 - Disagree
471
+ -2 - Strongly disagree
472
+
473
+ ================================================================================
474
+ YOUGOV DATA (tpnw_aware.csv)
475
+ ================================================================================
476
+
477
+ --------------------------------------------------------------------------------
478
+ caseid
479
+ --------------------------------------------------------------------------------
480
+
481
+ YouGov embedded data field
482
+
483
+ Numeric variable indicating case ID.
484
+
485
+ --------------------------------------------------------------------------------
486
+ starttime
487
+ --------------------------------------------------------------------------------
488
+
489
+ YouGov embedded data field
490
+
491
+ Date that respondent started the survey
492
+
493
+ --------------------------------------------------------------------------------
494
+ endtime
495
+ --------------------------------------------------------------------------------
496
+
497
+ YouGov embedded data field
498
+
499
+ Date that respondent finished the survey
500
+
501
+ --------------------------------------------------------------------------------
502
+ weight
503
+ --------------------------------------------------------------------------------
504
+
505
+ YouGov weighting variable
506
+
507
+ Numeric variable containing post-stratification weights.
508
+
509
+ --------------------------------------------------------------------------------
510
+ birthyr
511
+ --------------------------------------------------------------------------------
512
+
513
+ YouGov demographic question
514
+
515
+ Respondent's birth year (numeric).
516
+
517
+ --------------------------------------------------------------------------------
518
+ gender
519
+ --------------------------------------------------------------------------------
520
+
521
+ YouGov demographic question
522
+
523
+ Respondent's self-reported gender.
524
+
525
+ 1 - Male
526
+ 2 - Female
527
+
528
+ --------------------------------------------------------------------------------
529
+ race
530
+ --------------------------------------------------------------------------------
531
+
532
+ YouGov demographic question
533
+
534
+ Respondent's self-reported race.
535
+
536
+ 1 - White
537
+ 2 - Black
538
+ 3 - Hispanic
539
+ 4 - Asian
540
+ 5 - Native American
541
+ 6 - Mixed
542
+ 7 - Other
543
+ 8 - Middle Eastern
544
+
545
+ --------------------------------------------------------------------------------
546
+ educ
547
+ --------------------------------------------------------------------------------
548
+
549
+ YouGov demographic question
550
+
551
+ Respondent's self-reported education level.
552
+
553
+ 1 - No high school
554
+ 2 - High school graduate
555
+ 3 - Some college
556
+ 4 - 2-year college degree
557
+ 5 - 4-year college degree
558
+ 6 - Post-graduate degree
559
+
560
+ --------------------------------------------------------------------------------
561
+ marstat
562
+ --------------------------------------------------------------------------------
563
+
564
+ YouGov demographic question
565
+
566
+ Respondent's self-reported marital status.
567
+
568
+ 1 - Married
569
+ 2 - Separated
570
+ 3 - Divorced
571
+ 4 - Widowed
572
+ 5 - Never married
573
+ 6 - Domestic / civil partnership
574
+
575
+ --------------------------------------------------------------------------------
576
+ employ
577
+ --------------------------------------------------------------------------------
578
+
579
+ YouGov demographic question
580
+
581
+ Respondent's self-reported employment status.
582
+
583
+ 1 - Full-time
584
+ 2 - Part-time
585
+ 3 - Temporarily laid off
586
+ 4 - Unemployed
587
+ 5 - Retired
588
+ 6 - Permanently disabled
589
+ 7 - Homemaker
590
+ 8 - Student
591
+ 9 - Other
592
+
593
+ --------------------------------------------------------------------------------
594
+ faminc_new
595
+ --------------------------------------------------------------------------------
596
+
597
+ YouGov demographic question
598
+
599
+ Respondent's self-reported family income.
600
+
601
+ 1 - Less than $10,000
602
+ 2 - $10,000 - $19,999
603
+ 3 - $20,000 - $29,999
604
+ 4 - $30,000 - $39,999
605
+ 5 - $40,000 - $49,999
606
+ 6 - $50,000 - $59,999
607
+ 7 - $60,000 - $69,999
608
+ 8 - $70,000 - $79,999
609
+ 9 - $80,000 - $99,999
610
+ 10 - $100,000 - $119,999
611
+ 11 - $120,000 - $149,999
612
+ 12 - $150,000 - $199,999
613
+ 13 - $200,000 - $249,999
614
+ 14 - $250,000 - $349,999
615
+ 15 - $350,000 - $499,999
616
+ 16 - $500,000 or more
617
+ 97 - Prefer not to say
618
+
619
+ --------------------------------------------------------------------------------
620
+ pid3
621
+ --------------------------------------------------------------------------------
622
+
623
+ YouGov demographic question
624
+
625
+ Respondent's self-reported three-point partisan identification.
626
+
627
+ 1 - Democrat
628
+ 2 - Republican
629
+ 3 - Independent
630
+ 4 - Other
631
+ 5 - Not sure
632
+
633
+ --------------------------------------------------------------------------------
634
+ pid7
635
+ --------------------------------------------------------------------------------
636
+
637
+ YouGov demographic question
638
+
639
+ Respondent's self-reported seven-point partisan identification.
640
+
641
+ 1 - Strong Democrat
642
+ 2 - Not very strong Democrat
643
+ 3 - Lean Democrat
644
+ 4 - Independent
645
+ 5 - Lean Republican
646
+ 6 - Not very strong Republican
647
+ 7 - Strong Republican
648
+ 8 - Not sure
649
+ 9 - Don't know
650
+
651
+ --------------------------------------------------------------------------------
652
+ presvote2016post
653
+ --------------------------------------------------------------------------------
654
+
655
+ YouGov demographic question
656
+
657
+ Respondent's self-reported 2016 Presidential Election vote choice.
658
+
659
+ 1 - Hillary Clinton
660
+ 2 - Donald Trump
661
+ 3 - Gary Johnson
662
+ 4 - Jill Stein
663
+ 5 - Evan McMullin
664
+ 6 - Other
665
+ 7 - Did not vote for President
666
+
667
+ --------------------------------------------------------------------------------
668
+ inputstate
669
+ --------------------------------------------------------------------------------
670
+
671
+ YouGov demographic question
672
+
673
+ Respondent's state of residence.
674
+
675
+ 1 - Alabama
676
+ 2 - Alaska
677
+ 4 - Arizona
678
+ 5 - Arkansas
679
+ 6 - California
680
+ 8 - Colorado
681
+ 9 - Connecticut
682
+ 10 - Delaware
683
+ 11 - District of Columbia
684
+ 12 - Florida
685
+ 13 - Georgia
686
+ 15 - Hawaii
687
+ 16 - Idaho
688
+ 17 - Illinois
689
+ 18 - Indiana
690
+ 19 - Iowa
691
+ 20 - Kansas
692
+ 21 - Kentucky
693
+ 22 - Louisiana
694
+ 23 - Maine
695
+ 24 - Maryland
696
+ 25 - Massachusetts
697
+ 26 - Michigan
698
+ 27 - Minnesota
699
+ 28 - Mississippi
700
+ 29 - Missouri
701
+ 30 - Montana
702
+ 31 - Nebraska
703
+ 32 - Nevada
704
+ 33 - New Hampshire
705
+ 34 - New Jersey
706
+ 35 - New Mexico
707
+ 36 - New York
708
+ 37 - North Carolina
709
+ 38 - North Dakota
710
+ 39 - Ohio
711
+ 40 - Oklahoma
712
+ 41 - Oregon
713
+ 42 - Pennsylvania
714
+ 44 - Rhode Island
715
+ 45 - South Carolina
716
+ 46 - South Dakota
717
+ 47 - Tennessee
718
+ 48 - Texas
719
+ 49 - Utah
720
+ 50 - Vermont
721
+ 51 - Virginia
722
+ 53 - Washington
723
+ 54 - West Virginia
724
+ 55 - Wisconsin
725
+ 56 - Wyoming
726
+
727
+ --------------------------------------------------------------------------------
728
+ votereg
729
+ --------------------------------------------------------------------------------
730
+
731
+ YouGov demographic question
732
+
733
+ Respondent's self-reported voter registration status.
734
+
735
+ 1 - Yes
736
+ 2 - No
737
+ 3 - Don't know
738
+
739
+ --------------------------------------------------------------------------------
740
+ ideo5
741
+ --------------------------------------------------------------------------------
742
+
743
+ YouGov demographic question
744
+
745
+ Respondent's self-reported, five-point political ideology.
746
+
747
+ 1 - Very liberal
748
+ 2 - Liberal
749
+ 3 - Moderate
750
+ 4 - Conservative
751
+ 5 - Very conservative
752
+ 6 - Not sure
753
+
754
+ --------------------------------------------------------------------------------
755
+ newsint
756
+ --------------------------------------------------------------------------------
757
+
758
+ YouGov demographic question
759
+
760
+ Respondent's self-reported political interest.
761
+
762
+ 1 - Most of the time
763
+ 2 - Some of the time
764
+ 3 - Only now and then
765
+ 4 - Hardly at all
766
+ 7 - Don't know
767
+
768
+ --------------------------------------------------------------------------------
769
+ religpew
770
+ --------------------------------------------------------------------------------
771
+
772
+ YouGov demographic question
773
+
774
+ Pew religion
775
+
776
+ 1 - Protestant
777
+ 2 - Roman Catholic
778
+ 3 - Mormon
779
+ 4 - Eastern or Greek Orthodox
780
+ 5 - Jewish
781
+ 6 - Muslim
782
+ 7 - Buddhist
783
+ 8 - Hindu
784
+ 9 - Atheist
785
+ 10 - Agnostic
786
+ 11 - Nothing in particular
787
+ 12 - Something else
788
+
789
+ --------------------------------------------------------------------------------
790
+ awareness
791
+ --------------------------------------------------------------------------------
792
+
793
+ Outcome question
794
+
795
+ Has respondent heard of international treaty to ban nuclear weapons
796
+
797
+ 1 - Yes, and I support it
798
+ 2 - Yes, and I oppose it
799
+ 3 - No, but it sounds like I would support it
800
+ 4 - No, but it sounds like I would oppose it
801
+ 8 - Skipped
802
+
803
+ ================================================================================
804
+ ORIGINAL INCOME DATA (tpnw_orig_income.csv)
805
+ ================================================================================
806
+
807
+ --------------------------------------------------------------------------------
808
+ income
809
+ --------------------------------------------------------------------------------
810
+
811
+ Demographic question
812
+
813
+ Numeric text-entry variable indicating respondent's self-reported income;
814
+ converted to categorical variable to match with income from tpnw_raw.csv,
815
+ described above.
816
+
817
+ --------------------------------------------------------------------------------
818
+ consent
819
+ --------------------------------------------------------------------------------
820
+
821
+ Custom embedded data field
822
+
823
+ Indicator of whether a respondent consented to participate in the survey.
824
+
825
+ 0 - Respondent did not consent
826
+ 1 - Respondent consented
827
+
828
+ --------------------------------------------------------------------------------
829
+ pid
830
+ --------------------------------------------------------------------------------
831
+
832
+ Dynata embedded data field
833
+
834
+ Numeric variable uniquely identifying a panelist (panelist ID).
835
+
836
+ ================================================================================
837
+ CLEANED EXPERIMENTAL DATA (tpnw_data.csv)
838
+
839
+ Only newly instantiated variables are described below; any recodings of
840
+ variables described above are documented in the replication code cleaning script
841
+ (hbg_cleaning.R) available in ../scripts
842
+ ================================================================================
843
+
844
+ --------------------------------------------------------------------------------
845
+ female
846
+ --------------------------------------------------------------------------------
847
+
848
+ Demographic question
849
+
850
+ Indicator of whether respondent self-reported female gender.
851
+
852
+ 0 - No
853
+ 1 - Yes
854
+ NA - Other/skipped
855
+
856
+ --------------------------------------------------------------------------------
857
+ age
858
+ --------------------------------------------------------------------------------
859
+
860
+ Demographic question
861
+
862
+ Numeric variable indicating respondent's age, subtracting self-reported birth
863
+ year from 2019, the year in which the survey was conducted (2019 - birthyr).
864
+
865
+ --------------------------------------------------------------------------------
866
+ northeast
867
+ --------------------------------------------------------------------------------
868
+
869
+ Demographic question
870
+
871
+ Indicator of whether respondent's state is in the Northeast region defined by
872
+ the U.S. Census Bureau.
873
+
874
+ 0 - No
875
+ 1 - Yes
876
+ NA - Skipped
877
+
878
+ --------------------------------------------------------------------------------
879
+ midwest
880
+ --------------------------------------------------------------------------------
881
+
882
+ Demographic question
883
+
884
+ Indicator of whether respondent's state is in the Midwest region defined by the
885
+ U.S. Census Bureau.
886
+
887
+ 0 - No
888
+ 1 - Yes
889
+ NA - Skipped
890
+
891
+ --------------------------------------------------------------------------------
892
+ south
893
+ --------------------------------------------------------------------------------
894
+
895
+ Demographic question
896
+
897
+ Indicator of whether respondent's state is in the South region defined by the
898
+ U.S. Census Bureau.
899
+
900
+ 0 - No
901
+ 1 - Yes
902
+ NA - Skipped
903
+
904
+ --------------------------------------------------------------------------------
905
+ west
906
+ --------------------------------------------------------------------------------
907
+
908
+ Demographic question
909
+
910
+ Indicator of whether respondent's state is in the West region defined by the
911
+ U.S. Census Bureau.
912
+
913
+ 0 - No
914
+ 1 - Yes
915
+ NA - Skipped
916
+
917
+ --------------------------------------------------------------------------------
918
+ caseid
919
+ --------------------------------------------------------------------------------
920
+
921
+ Weighting variable
922
+
923
+ Unique identifier for each respondent for the purposes of computing raked
924
+ post-stratification weights with anesrake.
925
+
926
+ --------------------------------------------------------------------------------
927
+ age_wtng
928
+ --------------------------------------------------------------------------------
929
+
930
+ Weighting variable
931
+
932
+ Coarsened and factorized age variable for the purposes of computing raked
933
+ post-stratification weights with anesrake.
934
+
935
+ age1824 - Respondent is in the 18-24-year-old age group
936
+ age2534 - Respondent is in the 25-34-year-old age group
937
+ age3544 - Respondent is in the 35-44-year-old age group
938
+ age4554 - Respondent is in the 45-54-year-old age group
939
+ age5564 - Respondent is in the 55-64-year-old age group
940
+ age6599 - Respondent is in the 65-99-year-old age group
941
+
942
+ --------------------------------------------------------------------------------
943
+ female_wtng
944
+ --------------------------------------------------------------------------------
945
+
946
+ Weighting variable
947
+
948
+ Factorized female variable for the purposes of computing raked
949
+ post-stratification weights with anesrake.
950
+
951
+ female - Respondent is female
952
+ na - Skipped/Other
953
+ male - Respondent is male
954
+
955
+ --------------------------------------------------------------------------------
956
+ northeast_wtng
957
+ --------------------------------------------------------------------------------
958
+
959
+ Weighting variable
960
+
961
+ Factorized northeast variable for the purposes of computing raked
962
+ post-stratification weights with anesrake.
963
+
964
+ northeast - Respondent is from the Northeast
965
+ other - Respondent is from another region
966
+
967
+ --------------------------------------------------------------------------------
968
+ midwest_wtng
969
+ --------------------------------------------------------------------------------
970
+
971
+ Weighting variable
972
+
973
+ Factorized midwest variable for the purposes of computing raked
974
+ post-stratification weights with anesrake.
975
+
976
+ midwest - Respondent is from the Midwest
977
+ other - Respondent is from another region
978
+
979
+ --------------------------------------------------------------------------------
980
+ south_wtng
981
+ --------------------------------------------------------------------------------
982
+
983
+ Weighting variable
984
+
985
+ Factorized south variable for the purposes of computing raked
986
+ post-stratification weights with anesrake.
987
+
988
+ south - Respondent is from the South
989
+ other - Respondent is from another region
990
+
991
+ --------------------------------------------------------------------------------
992
+ west_wtng
993
+ --------------------------------------------------------------------------------
994
+
995
+ Weighting variable
996
+
997
+ Factorized west variable for the purposes of computing raked
998
+ post-stratification weights with anesrake.
999
+
1000
+ west - Respondent is from the West
1001
+ other - Respondent is from another region
1002
+
1003
+ --------------------------------------------------------------------------------
1004
+ anesrake_weight
1005
+ --------------------------------------------------------------------------------
1006
+
1007
+ Custom weighting variable
1008
+
1009
+ Raked post-stratification weights computed with anesrake.
1/replication-package/meta/hbg_instrument.pdf ADDED
Binary file (132 kB). View file
 
1/replication-package/meta/hbg_pap.pdf ADDED
Binary file (264 kB). View file
 
1/replication-package/scripts/hbg_analysis.R ADDED
@@ -0,0 +1,1033 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Initialize workspace.
2
+ ## Clear workspace.
3
+ rm(list = ls(all = TRUE))
4
+
5
+ ## Confirm working directory.
6
+ setwd("~/Downloads/hbg_replication")
7
+
8
+ ## Set seed.
9
+ set.seed(123)
10
+
11
+ ## Set number of iterations for bootstrap replication.
12
+ n_iter <- 10000
13
+
14
+ ## Load relevant packages.
15
+ library(sandwich)
16
+ library(car)
17
+
18
+ ## Load relevant helper functions.
19
+ source("scripts/helper_functions.R")
20
+
21
+ ## Load data.
22
+ # Load experimental data.
23
+ tpnw <- read.csv("data/tpnw_data.csv", row.names = 1,
24
+ stringsAsFactors = FALSE)
25
+
26
+ # Load YouGov data.
27
+ aware <- read.csv("data/tpnw_aware.csv", row.names = 1,
28
+ stringsAsFactors = FALSE)
29
+
30
+ ### Define relevant objects.
31
+ ## Define objects specifying outcomes.
32
+ # Specify join_tpnw object, representing main outcome.
33
+ join_tpnw <- "join_tpnw"
34
+
35
+ # Specify tpnw_atts object, representing attitudinal outcomes.
36
+ tpnw_atts <- names(tpnw)[startsWith(names(tpnw), "tpnw_atts")]
37
+
38
+ # Specify all_outs object, concatenating main and attitudinal outcomes.
39
+ all_outs <- c(join_tpnw, tpnw_atts)
40
+
41
+ ## Define objects specifying predictors.
42
+ # Define object specifying main treatments.
43
+ treats <- c("group_cue", "security_cue", "norms_cue", "institutions_cue")
44
+
45
+ # Define object specifying general demographics.
46
+ demos <- c("age", "female", "midwest", "west", "south", "income", "educ")
47
+
48
+ # Define object specifying politically relevant demographics.
49
+ pol_demos <- c("ideo", "pid3")
50
+
51
+ # Define list of conditioning sets (NULL corresponds to Model 1, whereas the use
52
+ # of demographic and political covariates corresponds to Model 2).
53
+ covars <- list(NULL, c(demos, pol_demos))
54
+
55
+ ### Produce analysis.
56
+ ## Produce balance table.
57
+ # Specify covariates to be used for balance table.
58
+ bal_covars <- c("age", "female", "northeast", "midwest", "west",
59
+ "south", "income", "educ", "ideo", "pid3")
60
+
61
+ # Produce balance table matrix output, looping over treatment group.
62
+ bal_mat <- lapply(0:4, function (i) {
63
+ # For each treatment value ...
64
+ apply(tpnw[bal_covars][tpnw$treatment == i,], 2, function (x) {
65
+
66
+ # Calculate the mean of each covariate.
67
+ mean_x <- mean(x)
68
+
69
+ # Calculate SE estimates using 10,000 bootstrap replicates.
70
+ sd_x <- sd(replicate(10000, {
71
+ samp <- x[sample(length(x), replace = TRUE)]
72
+ return(mean(samp))
73
+ }))
74
+
75
+ # Return a list containing both point estimates.
76
+ return(list(mean = mean_x, sd = sd_x))
77
+ })
78
+ })
79
+
80
+ # Bind point estimates for each treatment group.
81
+ bal_mat <- lapply(bal_mat, function (treat) {
82
+ do.call("rbind", unlist(treat, recursive = FALSE))
83
+ })
84
+
85
+ # Convert list into a matrix, with columns representing treatment group.
86
+ bal_mat <- do.call("cbind", bal_mat)
87
+
88
+ # Round all estimates to within three decimal points and convert to character
89
+ # for the purposes of producing tabular output.
90
+ bal_tab <- apply(bal_mat, 2, function (x) format(round(x, 3), digits = 3))
91
+
92
+ # Specify rows containing mean point estimates.
93
+ mean_rows <- endsWith(rownames(bal_tab), ".mean")
94
+
95
+ # Specify rows containing SE point estimates.
96
+ se_rows <- endsWith(rownames(bal_tab), ".sd")
97
+
98
+ # Reformat SE estimates to be within parentheses.
99
+ bal_tab[se_rows,] <- apply(bal_tab[se_rows,], 2, function (x) {
100
+ paste0("(", x, ")")
101
+ })
102
+
103
+ # Remove row names for rows with SE estimates.
104
+ rownames(bal_tab)[se_rows] <- ""
105
+
106
+ # Remove ".mean" string in row names for rows with mean estimates.
107
+ rownames(bal_tab)[mean_rows] <- gsub(".mean", "", rownames(bal_tab)[mean_rows])
108
+
109
+ # Concatenate data to comport with LaTeX tabular markup.
110
+ bal_tab <- paste(paste(paste(
111
+ capwords(rownames(bal_tab)), apply(bal_tab, 1, function (x) {
112
+ paste(x, collapse = " & ")
113
+ }),
114
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
115
+ bal_tab <- gsub("\\( ", "\\(", bal_tab)
116
+
117
+ # Produce tabular output.
118
+ sink("output/balance_tab.tex")
119
+ cat("\\begin{table}\n",
120
+ "\\caption{Covariate Balance Across Treatment Arms}\n",
121
+ "\\centering\\small\n",
122
+ "\\sisetup{\n",
123
+ "\tdetect-all,\n",
124
+ "\ttable-number-alignment = center,\n",
125
+ "\ttable-figures-integer = 1,\n",
126
+ "\ttable-figures-decimal = 3,\n",
127
+ "\tinput-symbols = {()}\n",
128
+ "}\n",
129
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{2.75cm}*{5}",
130
+ "{S[table-number-alignment = center, table-column-width = 1.75cm]}}\n"),
131
+ "\\toprule\n",
132
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
133
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
134
+ bal_tab,
135
+ "\\bottomrule\n",
136
+ "\\end{tabular}\n",
137
+ "\\end{table}\n")
138
+ sink()
139
+
140
+ ## Produce main results.
141
+ # Compute main results, looping over conditioning sets.
142
+ main_results <- lapply(covars, function (covar) {
143
+ # For each conditioning set ...
144
+ # Specify the relevant regression formula.
145
+ form <- as.formula(paste(join_tpnw, paste(c(treats, covar),
146
+ collapse = " + "), sep = " ~ "))
147
+
148
+ # Fit the OLS model per the specification.
149
+ fit <- lm(form, data = tpnw)
150
+
151
+ # Compute HC2 robust standard errors.
152
+ ses <- sqrt(diag(vcovHC(fit, type = "HC2")))
153
+
154
+ # Bind coefficient and SE output.
155
+ reg_out <- cbind(fit$coef[2:5], ses[2:5])
156
+
157
+ # Name output matrix columns and rows.
158
+ colnames(reg_out) <- c("coef", "se")
159
+ rownames(reg_out) <- treats
160
+
161
+ # Return output
162
+ return(as.data.frame(reg_out))
163
+ })
164
+
165
+ # Name results to distinguish between Model 1 and Model 2 estimates.
166
+ names(main_results) <- c("model_1", "model_2")
167
+
168
+ ## Assess significance of effect estimates and differences.
169
+ # Estimate Bonferroni-Holm-adjusted p-values.
170
+ bf_ps <- lapply(main_results, function (x) {
171
+ round(p.adjust(pnorm(x[, 1] / x[, 2], lower.tail = TRUE),
172
+ method = "holm"), 3)
173
+ })
174
+
175
+ # Estimate FDR-adjusted p-values, as an added robustness check.
176
+ fdr_ps <- lapply(main_results, function (x) {
177
+ round(p.adjust(pnorm(x[, 1] / x[, 2], lower.tail = TRUE),
178
+ method = "fdr"), 3)
179
+ })
180
+
181
+ # Redefine the main model (Model 2), and store full VCOV matrix.
182
+ main_model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue +
183
+ institutions_cue + age + female + midwest +
184
+ west + south + income + educ + ideo + pid3, tpnw)
185
+ main_vcov <- vcovHC(main_model, "HC2")
186
+
187
+ # Specify diff_sig function for assessing significance between two effect
188
+ # estimates (defined here for the sake of clarity).
189
+ diff_sig <- function (eff_1, eff_2) {
190
+ diff <- main_model$coef[eff_1] - main_model$coef[eff_2]
191
+ se <- sqrt(main_vcov[eff_1, eff_1] + main_vcov[eff_2, eff_2] -
192
+ 2 * main_vcov[eff_1, eff_2])
193
+ p <- 2 * (1 - pnorm(abs(diff) / se))
194
+ return (p)
195
+ }
196
+
197
+ # Assess the significance of the difference between institution and security cue
198
+ # effect estimates .
199
+ inst_sec_diff_p <- diff_sig("institutions_cue", "security_cue")
200
+
201
+ # Assess the significance of the difference between institution and group cue
202
+ # effect estimates
203
+ inst_grp_diff_p <- diff_sig("institutions_cue", "group_cue")
204
+
205
+ # Assess the significance of the difference between security and group cue
206
+ # effect estimates
207
+ sec_grp_diff_p <- diff_sig("security_cue", "group_cue")
208
+
209
+ # Assess the significance of the difference between security and norms cue
210
+ # effect estimates
211
+ sec_norms_diff_p <- diff_sig("security_cue", "norms_cue")
212
+
213
+ # Assess the significance of the difference between institution and group cue
214
+ # effect estimates
215
+ inst_norms_diff_p <- diff_sig("institutions_cue", "norms_cue")
216
+
217
+ # Assess the significance of the difference between institution and group cue
218
+ # effect estimates
219
+ grp_norms_diff_p <- diff_sig("group_cue", "norms_cue")
220
+
221
+ # The significance of differences between effect estimates was also assessed
222
+ # using 10,000 bootstrap replicates and two-tailed p-values; relevant code is
223
+ # included below with the institutions and security cues, for posterity, but is
224
+ # not run.
225
+
226
+ # Compute SE estimates.
227
+ # diffs <- replicate(10000, {
228
+ # samp <- tpnw[sample(nrow(tpnw), replace = TRUE),]
229
+ # model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue +
230
+ # institutions_cue + age + female + midwest +
231
+ # west + south + income + educ + ideo + pid3, samp)
232
+ # model$coef[5] - model$coef[3]
233
+ # })
234
+ # diffs_se <- sd(diffs)
235
+ #
236
+ # # Fit model.
237
+ # model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue +
238
+ # institutions_cue + age + female + midwest +
239
+ # west + south + income + educ + ideo + pid3, tpnw)
240
+ #
241
+ # # Compute two-tailed p-value.
242
+ # 2 * (1 - pnorm(abs((model$coef[5] - model$coef[3])/diffs_se)))
243
+
244
+ ## Assess YouGov results.
245
+ # Tabulate responses.
246
+ aware_table <- table(aware$awareness, useNA = "ifany")
247
+ names(aware_table) <- c("Yes, support", "Yes, oppose",
248
+ "No, support", "No, oppose", "Skipped")
249
+
250
+ # Compute both weighted and unweighted means.
251
+ aware_results <- lapply(1:4, function (resp) {
252
+ # Calculate weighted mean.
253
+ wt_mean <- with(aware, weighted.mean(awareness == resp,
254
+ w = weight, na.rm = TRUE))
255
+
256
+ # Calculate raw mean.
257
+ rw_mean <- with(aware, mean(awareness == resp, na.rm = TRUE))
258
+
259
+ # Concatenate means and rename vector.
260
+ means <- c(wt_mean, rw_mean)
261
+ names(means) <- c("weighted_mean", "raw_mean")
262
+
263
+ # Calculate SE estimates with 10,000 bootstrap replicates.
264
+ ses <- replicate(10000, {
265
+ samp <- aware[sample(nrow(aware),
266
+ replace = TRUE),]
267
+ wt_mean <- with(samp, weighted.mean(awareness == resp,
268
+ w = weight, na.rm = TRUE))
269
+ rw_mean <- with(samp, mean(awareness == resp,
270
+ na.rm = TRUE))
271
+ return(c(wt_mean, rw_mean))
272
+ })
273
+ ses <- apply(ses, 1, sd)
274
+ names(ses) <- c("weighted_mean", "raw_mean")
275
+
276
+ # Bind mean and SE estimates.
277
+ outs <- rbind(means, ses)
278
+ rownames(outs) <- paste(names(aware_table)[resp],
279
+ c("mean", "se"), sep = "_")
280
+ return(outs)
281
+ })
282
+
283
+ # Name results to distinguish between responses.
284
+ names(aware_results) <- c("Yes, support", "Yes, oppose",
285
+ "No, support", "No, oppose")
286
+
287
+ ## Assess covariate means for experimental and YouGov data (used in Table A1).
288
+ # Indicate the list of covariates to be assessed.
289
+ demo_tab_vars <- c("age", "female", "northeast", "midwest", "west", "south")
290
+
291
+ # Compute covariate averages for experimental data.
292
+ tpnw_means <- apply(tpnw[demo_tab_vars], 2, mean, na.rm = TRUE)
293
+
294
+ # Compute covariate averages for YouGov data.
295
+ aware_means <- apply(aware[demo_tab_vars], 2, function (x) {
296
+ weighted.mean(x, na.rm = TRUE, w = aware$weight)
297
+ })
298
+
299
+ # Compute bootstrap standard errors for demographic means.
300
+ demo_ses <- replicate(10000, {
301
+ # Sample the experimental data.
302
+ samp_tpnw <- tpnw[sample(nrow(tpnw), replace = TRUE), demo_tab_vars]
303
+
304
+ # Sample the YouGov data.
305
+ samp_aware <- aware[sample(nrow(aware), replace = TRUE),
306
+ c(demo_tab_vars, "weight")]
307
+
308
+ # Compute bootstrap means for experimental data.
309
+ tpnw_means <- apply(samp_tpnw[demo_tab_vars], 2, mean, na.rm = TRUE)
310
+
311
+ # Compute bootstrap means for YouGov data.
312
+ aware_means <- apply(samp_aware[demo_tab_vars], 2, function (x) {
313
+ weighted.mean(x, na.rm = TRUE, w = samp_aware$weight)
314
+ })
315
+
316
+ # Return the results as a list, and ensure that replicate() also returns a
317
+ # list.
318
+ return(list(tpnw = tpnw_means, aware = aware_means))
319
+ }, simplify = FALSE)
320
+
321
+ # Compute SE estimates for each set of demographics.
322
+ demo_ses <- lapply(c("tpnw", "aware"), function (dataset) {
323
+ # Group all estimates from each dataset.
324
+ sep_res <- lapply(demo_ses, function (iteration) {
325
+ return(iteration[[dataset]])
326
+ })
327
+
328
+ # Bind estimates.
329
+ sep_res <- do.call("rbind", sep_res)
330
+
331
+ # Compute SE estimates.
332
+ sep_ses <- apply(sep_res, 2, sd)
333
+
334
+ # Return SE estimates.
335
+ return(sep_ses)
336
+ })
337
+
338
+ ## Assess responses to the attitudinal battery.
339
+ # Assess responses to the attitudinal battery, looping over treatment group. For
340
+ # each treatment value ...
341
+ att_results <- lapply(0:4, function (i) {
342
+ # Calculate the average response to each attitudinal battery question.
343
+ atts_mean <- apply(tpnw[tpnw$treatment == i, tpnw_atts], 2, function (x) {
344
+ mean(x, na.rm = TRUE)
345
+ })
346
+
347
+ # Calculate SE estimates using 10,000 bootstrap replicates.
348
+ bl_atts_boot <- replicate(10000, {
349
+ dat <- tpnw[tpnw$treatment == i, tpnw_atts]
350
+ samp <- dat[sample(nrow(dat), replace = TRUE),]
351
+ apply(samp, 2, function (x) mean(x, na.rm = TRUE))
352
+ })
353
+ bl_atts_ses <- apply(bl_atts_boot, 1, sd)
354
+
355
+ # Combine mean and SE estimates and return results.
356
+ return(cbind(atts_mean, bl_atts_ses))
357
+ })
358
+
359
+ # Compute treatment effects on responses to the attitudinal battery, looping
360
+ # over conditioning sets.
361
+ att_effs <- lapply(covars, function (covar) {
362
+ # For each conditioning set ...
363
+ model_res <- lapply(tpnw_atts, function (out) {
364
+ # Specify the relevant regression formula.
365
+ form <- as.formula(paste(out,
366
+ paste(c(treats, covar),
367
+ collapse = " + "),
368
+ sep = " ~ "))
369
+
370
+ # Fit the OLS model per the specification.
371
+ fit <- lm(form, data = tpnw)
372
+
373
+ # Compute HC2 robust standard errors.
374
+ ses <- sqrt(diag(vcovHC(fit, type = "HC2")))
375
+
376
+ # Bind coefficient and SE output.
377
+ reg_out <- cbind(fit$coef[2:5], ses[2:5])
378
+
379
+ # Name output matrix columns and rows.
380
+ colnames(reg_out) <- c("coef", "se")
381
+ rownames(reg_out) <- treats
382
+
383
+ # Return output.
384
+ return(as.data.frame(reg_out))
385
+ })
386
+ # Name results to distinguish between each attitudinal battery
387
+ # outcome and return results.
388
+ names(model_res) <- tpnw_atts
389
+ return(model_res)
390
+ })
391
+
392
+ # Name results to distinguish between Model 1 and Model 2 estimates.
393
+ names(att_effs) <- c("model_1", "model_2")
394
+
395
+ ## Perform subgroup analysis.
396
+ # Compute mean support by political party, looping over treatment group.
397
+ pid_results <- lapply(0:4, function (treat) {
398
+ # For each partisan group ...
399
+ out <- lapply(-1:1, function (i) {
400
+ # Calculate average support.
401
+ pid_mean <- with(tpnw,
402
+ mean(join_tpnw[pid3 == i &
403
+ treatment == treat],
404
+ na.rm = TRUE))
405
+
406
+ # Calculate SE estimates with 10,000
407
+ # bootstrap replicates.
408
+ pid_boot <- replicate(10000, {
409
+ dat <- tpnw$join_tpnw[tpnw$pid3 == i &
410
+ tpnw$treatment == treat]
411
+ samp <- dat[sample(length(dat),
412
+ replace = TRUE)]
413
+ mean(samp, na.rm = TRUE)
414
+ })
415
+
416
+ # Concatenate and return mean and SE
417
+ # estimates.
418
+ return(c(mean = pid_mean, se = sd(pid_boot)))
419
+ })
420
+
421
+ # Name results to distinguish estimates by political party,
422
+ # and return output.
423
+ names(out) <- c("dem", "ind", "rep")
424
+ return(as.data.frame(out))
425
+ })
426
+
427
+ # Name results to distinguish between treatment groups.
428
+ names(pid_results) <- c("Control", paste(c("Group", "Security", "Norms",
429
+ "Institutions"), "Cue"))
430
+
431
+ # Assess significance between control-group means; for 10,000 bootstrap
432
+ # replicates ...
433
+ pid_diff_ses <- replicate(10000, {
434
+ # Sample with replacement.
435
+ samp <- tpnw[sample(nrow(tpnw), replace = TRUE),]
436
+
437
+ # Compute the difference between Democrats' and
438
+ # Independents' support.
439
+ dem_ind_diff <- with(samp[samp$treatment == 0,],
440
+ mean(join_tpnw[pid3 == -1],
441
+ na.rm = TRUE) -
442
+ mean(join_tpnw[pid3 == 0],
443
+ na.rm = TRUE))
444
+ # Compute the difference between Democrats' and
445
+ # Republicans' support.
446
+ dem_rep_diff <- with(samp[samp$treatment == 0,],
447
+ mean(join_tpnw[pid3 == -1],
448
+ na.rm = TRUE) -
449
+ mean(join_tpnw[pid3 == 1],
450
+ na.rm = TRUE))
451
+ # Compute the difference between Independents' and
452
+ # Republicans' support.
453
+ ind_rep_diff <- with(samp[samp$treatment == 0,],
454
+ mean(join_tpnw[pid3 == 1],
455
+ na.rm = TRUE) -
456
+ mean(join_tpnw[pid3 == 0],
457
+ na.rm = TRUE))
458
+
459
+ # Concatenate and name results.
460
+ out <- c(dem_ind_diff, dem_rep_diff, ind_rep_diff)
461
+ names(out) <- c("dem_ind", "dem_rep", "ind_rep")
462
+ return(out)
463
+ })
464
+
465
+ # Compute SE estimates for each difference.
466
+ pid_diff_ses <- apply(pid_diff_ses, 1, sd)
467
+
468
+ # Assess significance for each difference.
469
+ dem_ind_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "dem"] -
470
+ pid_results$Control["mean", "ind"]) / pid_diff_ses["dem_ind"]))
471
+ dem_rep_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "dem"] -
472
+ pid_results$Control["mean", "rep"]) / pid_diff_ses["dem_rep"]))
473
+ ind_rep_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "ind"] -
474
+ pid_results$Control["mean", "rep"]) / pid_diff_ses["ind_rep"]))
475
+
476
+ # Compute mean support by political ideology, looping over treatment group.
477
+ tpnw$ideo <- recode(tpnw$ideo, "c(-2, -1) = 'liberal';
478
+ 0 = 'moderate';
479
+ c(1, 2) = 'conservative'")
480
+ ideo_results <- lapply(0:4, function (treat) {
481
+ # For each ideological group ...
482
+ out <- lapply(c("liberal", "moderate", "conservative"), function (i) {
483
+ # Calculate average support.
484
+ pid_mean <- with(tpnw,
485
+ mean(join_tpnw[ideo == i &
486
+ treatment == treat],
487
+ na.rm = TRUE))
488
+
489
+ # Calculate SE estimates with 10,000
490
+ # bootstrap replicates.
491
+ pid_boot <- replicate(10000, {
492
+ dat <- tpnw$join_tpnw[tpnw$ideo == i &
493
+ tpnw$treatment == treat]
494
+ samp <- dat[sample(length(dat),
495
+ replace = TRUE)]
496
+ mean(samp, na.rm = TRUE)
497
+ })
498
+
499
+ # Concatenate and return mean and SE
500
+ # estimates.
501
+ return(c(mean = pid_mean, se = sd(pid_boot)))
502
+ })
503
+
504
+ # Name results to distinguish estimates by political ideology,
505
+ # and return output.
506
+ names(out) <- c("liberal", "moderate", "conservative")
507
+ return(as.data.frame(out))
508
+ })
509
+
510
+ # Name results to distinguish between treatment groups.
511
+ names(ideo_results) <- c("Control", paste(c("Group", "Security", "Norms",
512
+ "Institutions"), "Cue"))
513
+
514
+ ## Produce weighted main results.
515
+ # Compute weighted main results, looping over conditioning sets.
516
+ w_main_results <- lapply(covars, function (covar) {
517
+ # For each conditioning set ...
518
+ # Specify the relevant regression formula.
519
+ form <- as.formula(paste(join_tpnw, paste(c(treats, covar),
520
+ collapse = " + "), sep = " ~ "))
521
+
522
+ # Fit the OLS model per the specification.
523
+ fit <- lm(form, data = tpnw, weights = anesrake_weight)
524
+
525
+ # Compute HC2 robust standard errors.
526
+ ses <- sqrt(diag(vcovHC(fit, type = "HC2")))
527
+
528
+ # Bind coefficient and SE output.
529
+ reg_out <- cbind(fit$coef[2:5], ses[2:5])
530
+
531
+ # Name output matrix columns and rows.
532
+ colnames(reg_out) <- c("coef", "se")
533
+ rownames(reg_out) <- treats
534
+
535
+ # Return output
536
+ return(as.data.frame(reg_out))
537
+ })
538
+
539
+ # Name results to distinguish between Model 1 and Model 2 estimates.
540
+ names(w_main_results) <- c("model_1", "model_2")
541
+
542
+ ### Produce plots and tables.
543
+ ## Produce main results plot.
544
+ # Produce main results matrix for plotting.
545
+ main_mat <- do.call("rbind", lapply(1:2, function (model) {
546
+ cbind(main_results[[model]], model)
547
+ }))
548
+
549
+ # Store values for constructing 90- and 95-percent CIs.
550
+ z_90 <- qnorm(.95)
551
+ z_95 <- qnorm(.975)
552
+
553
+ # Open new pdf device.
554
+ setEPS()
555
+ postscript("output/fg1.eps", width = 8, height = 5.5)
556
+
557
+ # Define custom graphical parameters.
558
+ par(mar = c(8, 7, 2, 2))
559
+
560
+ # Open new, empty plot.
561
+ plot(0, type = "n", axes = FALSE, ann = FALSE,
562
+ xlim = c(-.3, .05), ylim = c(.8, 4))
563
+
564
+ # Produce guidelines to go behind point estimates and error bars.
565
+ abline(v = seq(-.3, .05, .05)[-7], col = "lightgrey", lty = 3)
566
+
567
+ # Add Model 1 point estimates.
568
+ par(new = TRUE)
569
+ plot(x = main_mat$coef[main_mat$model == 1], y = 1:4 + .05,
570
+ xlim = c(-.3, .05), ylim = c(.8, 4), pch = 16, col = "steelblue2",
571
+ xlab = "", ylab = "", axes = FALSE)
572
+
573
+ # Add Model 2 point estimates.
574
+ par(new = TRUE)
575
+ plot(x = main_mat$coef[main_mat$model == 2], y = 1:4 - .05,
576
+ xlim = c(-.3, .05), ylim = c(.8, 4), pch = 16, col = "#FF8F37", main = "",
577
+ xlab = "", ylab = "", axes = FALSE)
578
+
579
+ # Add horizontal axis indicating effect estimate size.
580
+ axis(side = 1, at = round(seq(-.3, 0, .05), 2), labels = FALSE)
581
+ mtext(side = 1, at = seq(-.3, .1, .1), text = c("-30", "-20", "-10", "0"),
582
+ cex = .9, line = .75)
583
+ axis(side = 1, at = round(seq(-.25, .05, .05), 2), tck = -.01, labels = FALSE)
584
+
585
+ # Add vertical axis specifying treatment names corresponding to point estimates.
586
+ axis(side = 2, at = 1:4, labels = FALSE)
587
+ mtext(side = 2, line = .75, at = 1:4,
588
+ text = paste(c("Group", "Security", "Norms", "Institutions"), "Cue"),
589
+ las = 1, padj = .35, cex = .9)
590
+
591
+ # Add axis labels.
592
+ mtext(side = 2, line = 2.3, at = 4.2, text = "Treatment",
593
+ font = 2, las = 1, xpd = TRUE)
594
+ mtext(side = 1, text = "Estimated Effect Size", line = 2.5, at = -.15, font = 2)
595
+
596
+ # Add a dashed line at zero.
597
+ abline(v = 0.00, lty = 2)
598
+
599
+ # Add two-sided, 90-percent CIs.
600
+ with(main_mat[main_mat$model == 1,],
601
+ segments(x0 = coef - z_90 * se, y0 = 1:4 + .05, x1 = coef + z_90 * se,
602
+ y1 = 1:4 + .05, col = "steelblue2", lwd = 3))
603
+ with(main_mat[main_mat$model == 2,],
604
+ segments(x0 = coef - z_90 * se, y0 = 1:4 - .05, x1 = coef + z_90 * se,
605
+ y1 = 1:4 - .05, col = "#FF8F37", lwd = 3))
606
+
607
+ # Add two-sided 95-percent CIs.
608
+ with(main_mat[main_mat$model == 1,],
609
+ segments(x0 = coef - z_95 *se, y0 = 1:4 + .05, x1 = coef + z_95 *se,
610
+ y1 = 1:4 + .05, col = "steelblue2", lwd = 1))
611
+ with(main_mat[main_mat$model == 2,],
612
+ segments(x0 = coef - z_95 *se, y0 = 1:4 - .05, x1 = coef + z_95 *se,
613
+ y1 = 1:4 - .05, col = "#FF8F37", lwd = 1))
614
+
615
+ # Add legend.
616
+ legend(legend = paste("Model", 1:2), x = -.15, y = -.275, horiz = TRUE,
617
+ pch = 16, col = c("steelblue2", "#FF8F37"), xjust = .5, xpd = TRUE,
618
+ text.width = .05, cex = .9)
619
+
620
+ # Draw a box around the plot.
621
+ box()
622
+
623
+ # Close the grpahical device.
624
+ dev.off()
625
+
626
+ ## Create tabular output for main results.
627
+ # Define matrix object of main results.
628
+ tab_dat <- do.call("cbind", main_results)
629
+
630
+ # Compute control-group means, with SE estimates; define OLS formula.
631
+ ctrl_form <- as.formula(paste(join_tpnw, paste(treats,
632
+ collapse = " + "), sep = " ~ "))
633
+
634
+ # Fit the OLS model per the specification and recover the control mean.
635
+ ctrl_fit <- lm(ctrl_form, data = tpnw)
636
+
637
+ # Recover the control-group mean.
638
+ ctrl_mean <- ctrl_fit$coef["(Intercept)"]
639
+
640
+ # Compute control SE.
641
+ ctrl_se <- sqrt(diag(vcovHC(ctrl_fit, "HC2")))["(Intercept)"]
642
+
643
+ # Concatenate mean and SE output with blank values for Model 2.
644
+ ctrl_results <- c(format(round(c(ctrl_mean, ctrl_se), 3) * 100, digits = 2),
645
+ "|", "|")
646
+
647
+ # Reformat data to include a decimal point.
648
+ tab_dat <- apply(tab_dat, 2, function (y) format(round(y, 3) * 100, digits = 2))
649
+
650
+ # Bind control-group means with main results data.
651
+ tab <- rbind(ctrl_results, tab_dat)
652
+
653
+ # Rename row containing control-group means.
654
+ rownames(tab)[which(rownames(tab) == "1")] <- "control_mean"
655
+
656
+ # Relabel coefficient columns.
657
+ coef_cols <- grep("coef$", colnames(tab))
658
+
659
+ # Relabel SE columns.
660
+ se_cols <- grep("se$", colnames(tab))
661
+
662
+ # Reformat SE estimates to be within parentheses.
663
+ tab[,se_cols] <- apply(tab[, se_cols], 2, function (y) paste0("(", y, ")"))
664
+
665
+ # Concatenate data to comport with LaTeX tabular markup.
666
+ tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(tab))),
667
+ apply(tab, 1, function (x) {
668
+ paste(x, collapse = " & ")
669
+ }), sep = " & "), collapse = " \\\\\n"), "\\\\\n")
670
+
671
+ # Produce tabular output.
672
+ sink("output/main_results_tab.tex")
673
+ cat("\\begin{table}\n",
674
+ "\\caption{Estimated Treatment Effects on Support for TPNW}\n",
675
+ "\\begin{adjustbox}{width = \\textwidth, center}\n",
676
+ "\\sisetup{\n",
677
+ "\tdetect-all,\n",
678
+ "\ttable-number-alignment = center,\n",
679
+ "\ttable-figures-integer = 1,\n",
680
+ "\ttable-figures-decimal = 3,\n",
681
+ "\ttable-space-text-post = *,\n",
682
+ "\tinput-symbols = {()}\n",
683
+ "}\n",
684
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{4}",
685
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
686
+ "\\toprule\n",
687
+ "& \\multicolumn{4}{c}{Model}\\\\\\cmidrule{2-5}\n",
688
+ "& \\multicolumn{2}{c}{{(1)}} & \\multicolumn{2}{c}{{(2)}} \\\\\\midrule\n",
689
+ tab,
690
+ "\\bottomrule\n",
691
+ "\\end{tabular}\n",
692
+ "\\end{adjustbox}\n",
693
+ "\\end{table}\n")
694
+ sink()
695
+
696
+ ## Create tabular output for YouGov results.
697
+ # Restructure data as a matrix.
698
+ aware_tab <- rbind(do.call("rbind", aware_results))
699
+
700
+ # Reformat data to include three decimal points.
701
+ aware_tab <- apply(aware_tab, 2, function (y) format(round(y, 3) * 100,
702
+ digits = 3))
703
+
704
+ # Relabel mean rows.
705
+ mean_rows <- endsWith(rownames(aware_tab), "mean")
706
+
707
+ # Relabel SE rows.
708
+ se_rows <- endsWith(rownames(aware_tab), "se")
709
+
710
+ # Reformat SE estimates to be within parentheses.
711
+ aware_tab[se_rows,] <- paste0("(", aware_tab[se_rows,], ")")
712
+
713
+ # Remove row names for rows with SE estimates.
714
+ rownames(aware_tab)[se_rows] <- ""
715
+
716
+ # Remove "_mean" indication in mean_rows.
717
+ rownames(aware_tab)[mean_rows] <- gsub("_mean", "",
718
+ rownames(aware_tab)[mean_rows])
719
+
720
+ # Add an empty row, where excluded calculations of responses among skips are
721
+ # noted in the table, and rename the relevant row.
722
+ aware_tab <- rbind(aware_tab, c("|", "|"))
723
+ rownames(aware_tab)[nrow(aware_tab)] <- "Skipped"
724
+
725
+ # Add an empty column to the table, and insert the count column at the relevant
726
+ # indices.
727
+ aware_tab[which(rownames(aware_tab) %in% names(aware_table)),]
728
+ aware_tab <- cbind(aware_tab, "")
729
+ colnames(aware_tab)[ncol(aware_tab)] <- "N"
730
+ aware_tab[which(rownames(aware_tab) %in% names(aware_table)), "N"] <- aware_table
731
+
732
+ # Concatenate data to comport with LaTeX tabular markup.
733
+ aware_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(aware_tab))),
734
+ apply(aware_tab, 1, function (x) {
735
+ paste(x, collapse = " & ")
736
+ }),
737
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
738
+
739
+ # Produce tabular output.
740
+ sink("output/yougov_tab.tex")
741
+ cat("\\begin{table}\n",
742
+ "\\caption{YouGov Survey Responses}\n",
743
+ "\\centering\\small\n",
744
+ "\\sisetup{\n",
745
+ "\tdetect-all,\n",
746
+ "\ttable-number-alignment = center,\n",
747
+ "\ttable-figures-integer = 1,\n",
748
+ "\ttable-figures-decimal = 3,\n",
749
+ "\tinput-symbols = {()}\n",
750
+ "}\n",
751
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
752
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
753
+ "\\toprule\n",
754
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
755
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
756
+ aware_tab,
757
+ "\\bottomrule\n",
758
+ "\\end{tabular}\n",
759
+ "\\end{table}\n")
760
+ sink()
761
+
762
+ ## Create tabular output for attitudinal results.
763
+ # Define matrix object of main results.
764
+ tab_dat <- do.call("cbind", att_results)
765
+
766
+ # Reformat matrix to alternate mean and SE estimates.
767
+ tab <- sapply(seq(0, 8, 2), function (i) {
768
+ matrix(c(t(tab_dat[,1:2 + i])), 14, 1)
769
+ })
770
+
771
+ # Reformat data to include three decimal points.
772
+ tab <- apply(tab, 2, function (y) format(round(y, 3), digits = 3))
773
+
774
+ # Rename rows to indicate mean and SE estimates.
775
+ rownames(tab) <- paste(rep(rownames(tab_dat), each = 2),
776
+ c("mean", "se"), sep = "_")
777
+
778
+ # Relabel mean rows.
779
+ mean_rows <- grep("_mean", rownames(tab))
780
+
781
+ # Relabel SE rows
782
+ se_rows <- grep("_se", rownames(tab))
783
+
784
+ # Reformat SE estimates to be within parentheses.
785
+ tab[se_rows,] <- apply(tab[se_rows,], 1, function (y) {
786
+ paste0("(", gsub(" ", "", y), ")")
787
+ })
788
+
789
+ # Rename rows to improve tabular labels; remove "tpnw_atts, "mean," and "se" row
790
+ # name strings.
791
+ rownames(tab) <- gsub("tpnw_atts|mean$|se$", "", rownames(tab))
792
+
793
+ # Remove leading and tailing underscores.
794
+ rownames(tab) <- gsub("^_|_$", "", rownames(tab))
795
+
796
+ # Remove row names for rows with SE estimates.
797
+ rownames(tab)[se_rows] <- ""
798
+
799
+ # Concatenate data to comport with LaTeX tabular markup.
800
+ tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(tab))),
801
+ apply(tab, 1, function (x) {
802
+ paste(x, collapse = " & ")
803
+ }),
804
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
805
+
806
+ # Produce tabular output.
807
+ sink("output/atts_tab.tex")
808
+ cat("\\begin{table}\n",
809
+ "\\caption{Attitudes Toward Nuclear Weapons by Arm}\n",
810
+ "\\centering\\small\n",
811
+ "\\sisetup{\n",
812
+ "\tdetect-all,\n",
813
+ "\ttable-number-alignment = center,\n",
814
+ "\ttable-figures-integer = 1,\n",
815
+ "\ttable-figures-decimal = 3,\n",
816
+ "\ttable-space-text-post = *,\n",
817
+ "\tinput-symbols = {()}\n",
818
+ "}\n",
819
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
820
+ "{S[table-number-alignment = center, table-column-width=1.25cm]}}\n"),
821
+ "\\toprule\n",
822
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
823
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
824
+ tab,
825
+ "\\bottomrule\n",
826
+ "\\end{tabular}\n",
827
+ "\\end{table}\n")
828
+ sink()
829
+
830
+ ## Create tabular output for results by political party.
831
+ # Restructure data such that mean and SE estimates are alternating rows in a
832
+ # 1 x 6 matrix, in each of five list elements, corresponding to each treatment
833
+ # group; and bind the results for each treatment group.
834
+ pid_tab <- lapply(pid_results, function (x) {
835
+ matrix(unlist(x), nrow = 6, ncol = 1)
836
+ })
837
+ pid_tab <- do.call("cbind", pid_tab)
838
+
839
+ # Assign row names to distinguish results for each partisan group, and mean and
840
+ # SE estimates.
841
+ rownames(pid_tab) <- paste(rep(c("democrat", "independent", "republican"),
842
+ each = 2), c("mean", "se"))
843
+
844
+ # Relabel mean rows.
845
+ mean_rows <- endsWith(rownames(pid_tab), "mean")
846
+
847
+ # Relabel SE rows.
848
+ se_rows <- endsWith(rownames(pid_tab), "se")
849
+
850
+ # Label columns per treatment, for the computation of ATEs.
851
+ colnames(pid_tab) <- c("control", treats)
852
+
853
+ # Compute ATEs, with control as baseline, and update tabular data.
854
+ pid_tab[mean_rows, treats] <- pid_tab[mean_rows, treats] -
855
+ pid_tab[mean_rows, "control"]
856
+
857
+ # Reformat data to include three decimal points.
858
+ pid_tab <- apply(pid_tab, 2, function (y) format(round(y, 3) * 100, digits = 3))
859
+
860
+ # Remove extraneous spacing.
861
+ pid_tab <- gsub(" ", "", pid_tab)
862
+
863
+ # Reformat SE estimates to be within parentheses.
864
+ pid_tab[se_rows,] <- paste0("(", pid_tab[se_rows,], ")")
865
+
866
+ # Remove row names for rows with SE estimates.
867
+ rownames(pid_tab)[se_rows] <- ""
868
+
869
+ # Concatenate data to comport with LaTeX tabular markup.
870
+ pid_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(pid_tab))),
871
+ apply(pid_tab, 1, function (x) {
872
+ paste(x, collapse = " & ")
873
+ }),
874
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
875
+
876
+ # Produce tabular output.
877
+ sink("output/pid_support.tex")
878
+ cat("\\begin{table}\n",
879
+ "\\caption{Support for Joining TPNW by Party ID}\n",
880
+ "\\centering\\small\n",
881
+ "\\sisetup{\n",
882
+ "\tdetect-all,\n",
883
+ "\ttable-number-alignment = center,\n",
884
+ "\ttable-figures-integer = 1,\n",
885
+ "\ttable-figures-decimal = 3,\n",
886
+ "\tinput-symbols = {()}\n",
887
+ "}\n",
888
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
889
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
890
+ "\\toprule\n",
891
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
892
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
893
+ pid_tab,
894
+ "\\bottomrule\n",
895
+ "\\end{tabular}\n",
896
+ "\\end{table}\n")
897
+ sink()
898
+
899
+ ## Create tabular output for results by political ideology.
900
+ # Restructure data such that mean and SE estimates are alternating rows in a
901
+ # 1 x 6 matrix, in each of five list elements, corresponding to each treatment
902
+ # group; and bind the results for each treatment group.
903
+ ideo_tab <- lapply(ideo_results, function (x) {
904
+ matrix(unlist(x), nrow = 6, ncol = 1)
905
+ })
906
+ ideo_tab <- do.call("cbind", ideo_tab)
907
+
908
+ # Assign row names to distinguish results for each idelogical group, and mean
909
+ # and SE estimates.
910
+ rownames(ideo_tab) <- paste(rep(c("liberal", "moderate", "conservative"),
911
+ each = 2), c("mean", "se"))
912
+
913
+ # Reformat data to include three decimal points.
914
+ ideo_tab <- apply(ideo_tab, 2, function (y) format(round(y, 3) * 100,
915
+ digits = 3))
916
+
917
+ # Relabel mean rows.
918
+ mean_rows <- endsWith(rownames(ideo_tab), "mean")
919
+
920
+ # Relabel SE rows.
921
+ se_rows <- endsWith(rownames(ideo_tab), "se")
922
+
923
+ # Reformat SE estimates to be within parentheses.
924
+ ideo_tab[se_rows,] <- paste0("(", ideo_tab[se_rows,], ")")
925
+
926
+ # Remove row names for rows with SE estimates.
927
+ rownames(ideo_tab)[se_rows] <- ""
928
+
929
+ # Concatenate data to comport with LaTeX tabular markup.
930
+ ideo_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(ideo_tab))),
931
+ apply(ideo_tab, 1, function (x) {
932
+ paste(x, collapse = " & ")
933
+ }),
934
+ sep = " & "), collapse = " \\\\\n"), "\\\\\n")
935
+
936
+ # Produce tabular output.
937
+ sink("output/ideo_support_tab.tex")
938
+ cat("\\begin{table}\n",
939
+ "\\caption{Support for Joining TPNW by Ideology}\n",
940
+ "\\centering\\small\n",
941
+ "\\sisetup{\n",
942
+ "\tdetect-all,\n",
943
+ "\ttable-number-alignment = center,\n",
944
+ "\ttable-figures-integer = 1,\n",
945
+ "\ttable-figures-decimal = 3,\n",
946
+ "\tinput-symbols = {()}\n",
947
+ "}\n",
948
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}",
949
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
950
+ "\\toprule\n",
951
+ "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n",
952
+ "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n",
953
+ ideo_tab,
954
+ "\\bottomrule\n",
955
+ "\\end{tabular}\n",
956
+ "\\end{table}\n")
957
+ sink()
958
+
959
+ ## Create tabular output for weighted main results.
960
+ # Define matrix object of weighted main results.
961
+ w_tab_dat <- do.call("cbind", w_main_results)
962
+
963
+ # Compute weighted control-group means, with SE estimates; define OLS formula.
964
+ w_ctrl_form <- as.formula(paste(join_tpnw, paste(treats,
965
+ collapse = " + "), sep = " ~ "))
966
+
967
+ # Fit the OLS model per the specification and recover the control mean.
968
+ w_ctrl_fit <- lm(w_ctrl_form, data = tpnw,
969
+ weights = anesrake_weight)
970
+
971
+ # Recover the control-group mean.
972
+ w_ctrl_mean <- w_ctrl_fit$coef["(Intercept)"]
973
+
974
+ # Compute control SE.
975
+ w_ctrl_se <- sqrt(diag(vcovHC(w_ctrl_fit, "HC2")))["(Intercept)"]
976
+
977
+
978
+ # Concatenate mean and SE output with blank values for Model 2.
979
+ w_ctrl_results <- c(format(round(c(w_ctrl_mean, w_ctrl_se), 3) * 100,
980
+ digits = 2), "|", "|")
981
+
982
+ # Reformat data to include a decimal point.
983
+ w_tab_dat <- apply(w_tab_dat, 2, function (y) format(round(y, 3) * 100,
984
+ digits = 2))
985
+
986
+ # Bind control-group means with main results data.
987
+ w_tab <- rbind(w_ctrl_results, w_tab_dat)
988
+
989
+ # Rename row containing control-group means.
990
+ rownames(w_tab)[which(rownames(w_tab) == "1")] <- "control_mean"
991
+
992
+ # Relabel coefficient columns.
993
+ coef_cols <- grep("coef$", colnames(w_tab))
994
+
995
+ # Relabel SE columns.
996
+ se_cols <- grep("se$", colnames(w_tab))
997
+
998
+ # Reformat SE estimates to be within parentheses.
999
+ w_tab[,se_cols] <- apply(w_tab[, se_cols], 2, function (y) paste0("(", y, ")"))
1000
+
1001
+ # Concatenate data to comport with LaTeX tabular markup.
1002
+ w_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(w_tab))),
1003
+ apply(w_tab, 1, function (x) {
1004
+ paste(x, collapse = " & ")
1005
+ }), sep = " & "), collapse = " \\\\\n"), "\\\\\n")
1006
+
1007
+ # Produce tabular output.
1008
+ sink("output/weighted_main_results_tab.tex")
1009
+ cat("\\begin{table}\n",
1010
+ "\\caption{Estimated Treatment Effects on Support for TPNW (Weighted)}\n",
1011
+ "\\begin{adjustbox}{width = \\textwidth, center}\n",
1012
+ "\\sisetup{\n",
1013
+ "\tdetect-all,\n",
1014
+ "\ttable-number-alignment = center,\n",
1015
+ "\ttable-figures-integer = 1,\n",
1016
+ "\ttable-figures-decimal = 3,\n",
1017
+ "\ttable-space-text-post = *,\n",
1018
+ "\tinput-symbols = {()}\n",
1019
+ "}\n",
1020
+ paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{4}",
1021
+ "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"),
1022
+ "\\toprule\n",
1023
+ "& \\multicolumn{4}{c}{Model}\\\\\\cmidrule{2-5}\n",
1024
+ "& \\multicolumn{2}{c}{{(1)}} & \\multicolumn{2}{c}{{(2)}} \\\\\\midrule\n",
1025
+ w_tab,
1026
+ "\\bottomrule\n",
1027
+ "\\end{tabular}\n",
1028
+ "\\end{adjustbox}\n",
1029
+ "\\end{table}\n")
1030
+ sink()
1031
+
1032
+ ### Save image containing all objects.
1033
+ save.image(file = "output/hbg_replication_out.RData")
1/replication-package/scripts/hbg_cleaning.R ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Initialize workspace.
2
+ rm(list = ls(all = TRUE))
3
+ setwd("~/Downloads/hbg_replication")
4
+
5
+ # Load required packages
6
+ library(plyr)
7
+ library(car)
8
+ library(anesrake)
9
+
10
+ # Load relevant functions.
11
+ source("scripts/helper_functions.R")
12
+
13
+ ## Load data.
14
+ # Load TPNW experimental data.
15
+ tpnw <- read.csv("data/tpnw_raw.csv", stringsAsFactors = FALSE, row.names = 1)
16
+
17
+ # Load original income question data.
18
+ orig_inc <- read.csv("data/tpnw_orig_income.csv", stringsAsFactors = FALSE,
19
+ row.names = 1)
20
+
21
+ # Load YouGov data (including covariates and awareness question).
22
+ aware <- read.csv("data/tpnw_aware_raw.csv", stringsAsFactors = FALSE,
23
+ row.names = 1)
24
+
25
+ ### Clean TPNW data.
26
+ ## Clean data.
27
+ # Remove first two (extraneous) rows.
28
+ tpnw <- tpnw[-c(1, 2),]
29
+ orig_inc <- orig_inc[-c(1, 2),]
30
+
31
+ # Remove respondents who did not consent.
32
+ tpnw <- tpnw[tpnw$consent == "1",]
33
+ orig_inc <- orig_inc[orig_inc$consent == "1",]
34
+
35
+ # Coalesce income variables.
36
+ orig_inc <- within(orig_inc, {
37
+ income <- as.numeric(income)
38
+ income <- ifelse(income < 1000, NA, income)
39
+ income <- ifelse(income < 15000, 1, income)
40
+ income <- ifelse(income >= 15000 & income < 25000, 2, income)
41
+ income <- ifelse(income >= 25000 & income < 50000, 3, income)
42
+ income <- ifelse(income >= 50000 & income < 75000, 4, income)
43
+ income <- ifelse(income >= 75000 & income < 100000, 5, income)
44
+ income <- ifelse(income >= 100000 & income < 150000, 6, income)
45
+ income <- ifelse(income >= 150000 & income < 200000, 7, income)
46
+ income <- ifelse(income >= 200000 & income < 250000, 8, income)
47
+ income <- ifelse(income >= 250000 & income < 500000, 9, income)
48
+ income <- ifelse(income >= 500000 & income < 1000000, 10, income)
49
+ income <- ifelse(income >= 1000000, 11, income)
50
+ })
51
+ orig_inc <- data.frame(pid = orig_inc$pid, income_old = orig_inc$income)
52
+ tpnw <- plyr::join(tpnw, orig_inc, by = "pid", type = "left")
53
+ tpnw <- within(tpnw, {
54
+ income <- coalesce(as.numeric(income), as.numeric(income_old))
55
+ })
56
+
57
+ # Note meta variables.
58
+ meta <- c("consent", "confirmation_code", "new_income_q")
59
+
60
+ # Note Qualtrics variables.
61
+ qualtrics_vars <- c("StartDate", "EndDate", "Status", "Progress",
62
+ "Duration..in.seconds.", "Finished", "RecordedDate",
63
+ "DistributionChannel", "UserLanguage")
64
+
65
+ # Note Dynata variables.
66
+ dynata_vars <- c("pid", "psid")
67
+
68
+ # Note non-numeric variables.
69
+ char_vars <- c(qualtrics_vars, dynata_vars,
70
+ c("ResponseId"), names(tpnw)[grep("text", tolower(names(tpnw)))])
71
+ char_cols <- which(names(tpnw) %in% char_vars)
72
+
73
+ # Numericize other variables
74
+ tpnw <- data.frame(apply(tpnw[, -char_cols], 2, as.numeric), tpnw[char_cols])
75
+
76
+ tpnw_atts <- which(names(tpnw) %in% c("danger", "peace", "safe", "use_unaccept",
77
+ "always_cheat", "cannot_elim", "slow_reduc"))
78
+ names(tpnw)[tpnw_atts] <- paste("tpnw_atts", names(tpnw)[tpnw_atts], sep = "_")
79
+
80
+ # Coalesce relevant variables.
81
+ tpnw <- within(tpnw, {
82
+ # Clean gender variable.
83
+ female <- ifelse(gender == 95, NA, gender)
84
+
85
+ # Transform birthyr variable to age.
86
+ age <- 2019 - birthyr
87
+
88
+ # Transform income variable.
89
+ income <- car::recode(income, "95 = NA")
90
+
91
+ # Combine pid and pid_forc variables.
92
+ pid3 <- ifelse(pid3 == 0, pid_forc, pid3)
93
+
94
+ # Recode ideology variable.
95
+ ideo <- car::recode(ideo, "3 = NA")
96
+
97
+ # Recode education variable.
98
+ educ <- car::recode(educ, "95 = NA")
99
+
100
+ # Recode state variable.
101
+ state <- recode(state, "1 = 'Alabama';
102
+ 2 = 'Alaska';
103
+ 4 = 'Arizona';
104
+ 5 = 'Arkansas';
105
+ 6 = 'California';
106
+ 8 = 'Colorado';
107
+ 9 = 'Connecticut';
108
+ 10 = 'Delaware';
109
+ 11 = 'Washington DC';
110
+ 12 = 'Florida';
111
+ 13 = 'Georgia';
112
+ 15 = 'Hawaii';
113
+ 16 = 'Idaho';
114
+ 17 = 'Illinois';
115
+ 18 = 'Indiana';
116
+ 19 = 'Iowa';
117
+ 20 = 'Kansas';
118
+ 21 = 'Kentucky';
119
+ 22 = 'Louisiana';
120
+ 23 = 'Maine';
121
+ 24 = 'Maryland';
122
+ 25 = 'Massachusetts';
123
+ 26 = 'Michigan';
124
+ 27 = 'Minnesota';
125
+ 28 = 'Mississippi';
126
+ 29 = 'Missouri';
127
+ 30 = 'Montana';
128
+ 31 = 'Nebraska';
129
+ 32 = 'Nevada';
130
+ 33 = 'New Hampshire';
131
+ 34 = 'New Jersey';
132
+ 35 = 'New Mexico';
133
+ 36 = 'New York';
134
+ 37 = 'North Carolina';
135
+ 38 = 'North Dakota';
136
+ 39 = 'Ohio';
137
+ 40 = 'Oklahoma';
138
+ 41 = 'Oregon';
139
+ 42 = 'Pennsylvania';
140
+ 44 = 'Rhode Island';
141
+ 45 = 'South Carolina';
142
+ 46 = 'South Dakota';
143
+ 47 = 'Tennessee';
144
+ 48 = 'Texas';
145
+ 49 = 'Utah';
146
+ 50 = 'Vermont';
147
+ 51 = 'Virginia';
148
+ 53 = 'Washington';
149
+ 54 = 'West Virginia';
150
+ 55 = 'Wisconsin';
151
+ 56 = 'Wyoming'")
152
+
153
+ # Create regional indicators.
154
+ northeast <- state %in% c("Connecticut", "Maine", "Massachusetts",
155
+ "New Hampshire", "Rhode Island", "Vermont",
156
+ "New Jersey", "New York", "Pennsylvania")
157
+ midwest <- state %in% c("Illinois", "Indiana", "Michigan", "Ohio",
158
+ "Wisconsin", "Iowa", "Kansas", "Minnesota",
159
+ "Missouri", "Nebraska", "North Dakota",
160
+ "South Dakota")
161
+ south <- state %in% c("Delaware", "Florida", "Georgia", "Maryland",
162
+ "North Carolina", "South Carolina", "Virginia",
163
+ "Washington DC", "West Virginia", "Alabama",
164
+ "Kentucky", "Mississippi", "Tennessee", "Arkansas",
165
+ "Louisiana", "Oklahoma", "Texas")
166
+ west <- state %in% c("Arizona", "Colorado", "Idaho", "Montana", "Nevada",
167
+ "New Mexico", "Utah", "Wyoming", "Alaska",
168
+ "California", "Hawaii", "Oregon", "Washington")
169
+
170
+ # Recode join_tpnw outcome.
171
+ join_tpnw <- car::recode(join_tpnw, "2 = 0")
172
+
173
+ # Create indicator variables for each treatment arm.
174
+ control <- treatment == 0
175
+ group_cue <- treatment == 1
176
+ security_cue <- treatment == 2
177
+ norms_cue <- treatment == 3
178
+ institutions_cue <- treatment == 4
179
+
180
+ # Recode attitudinal outcomes.
181
+ tpnw_atts_danger <- recode(tpnw_atts_danger, "-2 = 2; -1 = 1; 1 = -1; 2 = -2")
182
+ tpnw_atts_use_unaccept <- recode(tpnw_atts_use_unaccept, "-2 = 2; -1 = 1;
183
+ 1 = -1; 2 = -2")
184
+ tpnw_atts_always_cheat <- recode(tpnw_atts_always_cheat, "-2 = 2; -1 = 1;
185
+ 1 = -1; 2 = -2")
186
+ tpnw_atts_cannot_elim <- recode(tpnw_atts_cannot_elim, "-2 = 2; -1 = 1;
187
+ 1 = -1; 2 = -2")
188
+ })
189
+
190
+ # Use mean imputation for missingness.
191
+ # Redefine char_cols object.
192
+ char_cols <- which(names(tpnw) %in% c(char_vars, meta, "state", "pid_forc",
193
+ "income_old", "gender"))
194
+
195
+ # Define out_vars object.
196
+ out_vars <- which(names(tpnw) %in% c("join_tpnw", "n_nukes", "n_tests") |
197
+ startsWith(names(tpnw), "tpnw_atts") |
198
+ startsWith(names(tpnw), "physical_eff") |
199
+ startsWith(names(tpnw), "testing_matrix"))
200
+
201
+ # Mean impute.
202
+ tpnw[,-c(char_cols, out_vars)] <-
203
+ data.frame(apply(tpnw[, -c(char_cols, out_vars)], 2, function (x) {
204
+ replace(x, is.na(x), mean(x, na.rm = TRUE))
205
+ }))
206
+
207
+ ### Clean YouGov data.
208
+ ## Indicate all non-numeric variables.
209
+ # Indicate YouGov metadata variables (e.g., start/end time, respondent ID) that
210
+ # may contain characters.
211
+ yougov_vars <- c("starttime", "endtime")
212
+
213
+ # Numericize all numeric variables
214
+ aware <- data.frame(apply(aware[, -which(names(aware) %in% yougov_vars)], 2,
215
+ as.numeric), aware[which(names(aware) %in% yougov_vars)])
216
+
217
+ # Coalesce relevant variables.
218
+ aware <- within(aware, {
219
+ # Clean gender variable to an indicator of female gender (renamed below).
220
+ gender <- recode(gender, "8 = NA") - 1
221
+
222
+ # Transform birthyr variable to age (renamed below).
223
+ birthyr <- 2020 - birthyr
224
+
225
+ # Recode pid3 variable.
226
+ pid3 <- recode(pid3, "1 = -1; 2 = 1; 3 = 0; c(5, 8, 9) = NA")
227
+
228
+ # Recode pid7
229
+ pid7 <- recode(pid7, "1 = -3; 2 = -2; 3 = -1; 4 = 0; 5 = 1; 6 = 2; 7 = 3;
230
+ c(8, 98) = NA")
231
+
232
+ # Code pid variable from pid7.
233
+ party <- recode(pid7, "c(-3, -2, -1) = -1; c(1, 2, 3) = 1")
234
+
235
+ # Recode ideology variable.
236
+ ideo5 <- recode(ideo5, "c(6, 8, 9) = NA") - 3
237
+
238
+ # Recode education variable.
239
+ educ <- recode(educ, "c(8, 9) = NA")
240
+
241
+ # Recode state variable.
242
+ state <- recode(inputstate, "1 = 'Alabama';
243
+ 2 = 'Alaska';
244
+ 4 = 'Arizona';
245
+ 5 = 'Arkansas';
246
+ 6 = 'California';
247
+ 8 = 'Colorado';
248
+ 9 = 'Connecticut';
249
+ 10 = 'Delaware';
250
+ 11 = 'Washington DC';
251
+ 12 = 'Florida';
252
+ 13 = 'Georgia';
253
+ 15 = 'Hawaii';
254
+ 16 = 'Idaho';
255
+ 17 = 'Illinois';
256
+ 18 = 'Indiana';
257
+ 19 = 'Iowa';
258
+ 20 = 'Kansas';
259
+ 21 = 'Kentucky';
260
+ 22 = 'Louisiana';
261
+ 23 = 'Maine';
262
+ 24 = 'Maryland';
263
+ 25 = 'Massachusetts';
264
+ 26 = 'Michigan';
265
+ 27 = 'Minnesota';
266
+ 28 = 'Mississippi';
267
+ 29 = 'Missouri';
268
+ 30 = 'Montana';
269
+ 31 = 'Nebraska';
270
+ 32 = 'Nevada';
271
+ 33 = 'New Hampshire';
272
+ 34 = 'New Jersey';
273
+ 35 = 'New Mexico';
274
+ 36 = 'New York';
275
+ 37 = 'North Carolina';
276
+ 38 = 'North Dakota';
277
+ 39 = 'Ohio';
278
+ 40 = 'Oklahoma';
279
+ 41 = 'Oregon';
280
+ 42 = 'Pennsylvania';
281
+ 44 = 'Rhode Island';
282
+ 45 = 'South Carolina';
283
+ 46 = 'South Dakota';
284
+ 47 = 'Tennessee';
285
+ 48 = 'Texas';
286
+ 49 = 'Utah';
287
+ 50 = 'Vermont';
288
+ 51 = 'Virginia';
289
+ 53 = 'Washington';
290
+ 54 = 'West Virginia';
291
+ 55 = 'Wisconsin';
292
+ 56 = 'Wyoming'")
293
+
294
+ # Define US Census geographic regions.
295
+ northeast <- inputstate %in% c(9, 23, 25, 33, 44, 50, 34, 36, 42)
296
+ midwest <- inputstate %in% c(18, 17, 26, 39, 55, 19, 20, 27, 29, 31, 38, 46)
297
+ south <- inputstate %in% c(10, 11, 12, 13, 24, 37, 45, 51,
298
+ 54, 1, 21, 28, 47, 5, 22, 40, 48)
299
+ west <- inputstate %in% c(4, 8, 16, 35, 30, 49, 32, 56, 2, 6, 15, 41, 53)
300
+
301
+ # Recode employment.
302
+ employ <- recode(employ, "c(9, 98, 99) = NA")
303
+
304
+ # Recode outcome.
305
+ awareness <- recode(awareness, "8 = NA")
306
+
307
+ # Normalize weights.
308
+ weight <- weight / sum(weight)
309
+ })
310
+
311
+ # Rename demographic questions.
312
+ aware <- rename(aware, c("gender" = "female", "birthyr" = "age",
313
+ "faminc_new" = "income", "ideo5" = "ideo"))
314
+
315
+ ## Impute missing values.
316
+ # Specify non-covariate numerical variables (other is exempted since over 10% of
317
+ # responses are missing; state is exempted since the variable is categorical).
318
+ non_covars <- names(aware)[names(aware) %in% c("caseid", "starttime", "endtime",
319
+ "awareness", "state", "weight")]
320
+
321
+ # Use mean imputation for missingness in covariates.
322
+ aware[, -which(names(aware) %in% non_covars)] <-
323
+ data.frame(apply(aware[, -which(names(aware) %in%
324
+ non_covars)], 2, function (x) {
325
+ replace(x, is.na(x), mean(x, na.rm = TRUE))
326
+ }))
327
+
328
+ ### Produce weights for TPNW experimental data using anesrake.
329
+ ## Create unique identifier variable for assigning weights.
330
+ tpnw$caseid <- 1:nrow(tpnw)
331
+
332
+ ## Recode relevant covariates for reweighting: coarsen age; recode female; and
333
+ ## recode geographic covariates.
334
+ # Coarsen age into a categorical variable for age groups.
335
+ tpnw$age_wtng <- cut(tpnw$age, c(0, 25, 35, 45, 55, 65, 99))
336
+ levels(tpnw$age_wtng) <- c("age1824", "age2534", "age3544",
337
+ "age4554", "age5564", "age6599")
338
+
339
+ # Recode female as a factor to account for NA values.
340
+ tpnw$female_wtng <- as.factor(tpnw$female)
341
+ levels(tpnw$female_wtng) <- c("male", "na", "female")
342
+
343
+ # Recode northeast as a factor.
344
+ tpnw$northeast_wtng <- as.factor(tpnw$northeast)
345
+ levels(tpnw$northeast_wtng) <- c("other", "northeast")
346
+
347
+ # Recode midwest as a factor.
348
+ tpnw$midwest_wtng <- as.factor(tpnw$midwest)
349
+ levels(tpnw$midwest_wtng) <- c("other", "midwest")
350
+
351
+ # Recode south as a factor.
352
+ tpnw$south_wtng <- as.factor(tpnw$south)
353
+ levels(tpnw$south_wtng) <- c("other", "south")
354
+
355
+ # Recode west as a factor.
356
+ tpnw$west_wtng <- as.factor(tpnw$west)
357
+ levels(tpnw$west_wtng) <- c("other", "west")
358
+
359
+ ## Specify population targets for balancing (from US Census 2018 data).
360
+ # Specify gender proportion targets and assign names to comport with factors.
361
+ femaletarg <- c(.508, 0, .492)
362
+ names(femaletarg) <- c("female", "na", "male")
363
+
364
+ # Specify age-group proportion targets and assign names to comport with factors.
365
+ agetarg <- c(29363, 44854, 40659, 41537, 41700, 51080)/249193
366
+ names(agetarg) <- c("age1824", "age2534", "age3544",
367
+ "age4554", "age5564", "age6599")
368
+
369
+ # Specify northeast proportion targets and assign names to comport with factors.
370
+ northeasttarg <- c(1 - .173, .173)
371
+ names(northeasttarg) <- c("other", "northeast")
372
+
373
+ # Specify midwest proportion targets and assign names to comport with factors.
374
+ midwesttarg <- c(1 - .209, .209)
375
+ names(midwesttarg) <- c("other", "midwest")
376
+
377
+ # Specify south proportion targets and assign names to comport with factors.
378
+ southtarg <- c(1 - .380, .380)
379
+ names(southtarg) <- c("other", "south")
380
+
381
+ # Specify west proportion targets and assign names to comport with factors.
382
+ westtarg <- c(1 - .238, .238)
383
+ names(westtarg) <- c("other", "west")
384
+
385
+ # Create a list of all targets, with names to comport with relevant variables.
386
+ targets <- list(femaletarg, agetarg, northeasttarg,
387
+ midwesttarg, southtarg, westtarg)
388
+ names(targets) <- c("female_wtng", "age_wtng", "northeast_wtng",
389
+ "midwest_wtng", "south_wtng", "west_wtng")
390
+
391
+ # Produce anesrake weights.
392
+ anesrake_out <- anesrake(targets, tpnw, caseid = tpnw$caseid,
393
+ verbose = TRUE)
394
+
395
+ # Append anesrake weights to TPNW experimental data.
396
+ tpnw$anesrake_weight <- anesrake_out$weightvec
397
+
398
+ # Remove variables used for weighting.
399
+ tpnw <- tpnw[-grep("wtng$", names(tpnw))]
400
+
401
+ ## Write data.
402
+ # Write full experimental dataset.
403
+ write.csv(tpnw, "data/tpnw_data.csv")
404
+
405
+ # write full YouGov dataset.
406
+ write.csv(aware, "data/tpnw_aware.csv")
1/replication-package/scripts/hbg_group_cue.R ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Initialize workspace.
2
+ # Remove objects.
3
+ rm(list = ls(all = TRUE))
4
+
5
+ ## Generate data.
6
+ # Create count object storing count data.
7
+ count <- as.matrix(c(1547, 54, 2346))
8
+
9
+ # Convert count object to an object storing percentages.
10
+ perc <- sapply(count, function (x) x/sum(count))
11
+
12
+ # Create a cumulative percentage object.
13
+ cum_perc <- cumsum(perc)
14
+
15
+ # Create separate objects for the plotting of each proportion.
16
+ power_x <- c(0, rep(.74, 2), 0)
17
+ both_x <- c(.74, rep(.96, 2), .74)
18
+ weap_x <- c(.96, rep(1, 2), .96)
19
+
20
+ # Create an object representing the y-axis plotting points for each polygon.
21
+ plot_y <- c(2.25, 2.25, 3, 3)
22
+
23
+ # Open new .pdf file.
24
+ setEPS()
25
+ postscript("fgc1.eps", width = 10, height = 3)
26
+
27
+ # Modify graphical parameters (margins).
28
+ par(mar = c(0, 6, 6, 1))
29
+
30
+ # Create an empty plot.
31
+ plot(1, type = "n", xlab = "", ylab = "", xlim = c(0, 1), ylim = c(1.5, 3), axes = FALSE)
32
+
33
+ # Create polygons representing each proportion.
34
+ polygon(power_x, plot_y, col = "#FF8F37", border = "white")
35
+ polygon(both_x, plot_y, col = "steelblue3", border = "white")
36
+ polygon(weap_x, plot_y, col = "gray", border = "white")
37
+
38
+ # Create an axis and tick and axis labels.
39
+ axis(side = 3, at = seq(0, 1, .1), labels = FALSE)
40
+ text(x = seq(0, 1, .2), y = par("usr")[4] + .2, labels = c("0%", "20%", "40%", "60%", "80%", "100%"), xpd = TRUE)
41
+ mtext(text = "Proportion of Responses", side = 3, line = 2.5, cex = 1.25, font = 2)
42
+
43
+ # Add text denoting the percentage number associated of each proportion.
44
+ text(x = .74/2, y = 2.2, pos = 1, cex = 2, labels = "74%", col = "#FF8F37", font = 2)
45
+ text(x = .85, y = 2.2, pos = 1, cex = 2, labels = "22%", col = "steelblue3", font = 2)
46
+ text(x = .98, y = 2.2, labels = "4%", pos = 1, cex = 2, col = "grey", font = 2, xpd = TRUE)
47
+
48
+ # Add a legend.
49
+ leg = legend(x = -.16,, y = 2.625, legend = c("Oppose", "Support", "Prefer not\nto answer"), xpd = TRUE,
50
+ pch = 16, col = c("#FF8F37", "steelblue3", "grey"), box.lty = 0, cex = .9, y.intersp = 1.5, yjust = .5)
51
+
52
+ # Close the device.
53
+ dev.off()
1/replication-package/scripts/helper_functions.R ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define coalesce function for recoding of post-election thermometers.
2
+ coalesce <- function (...) {
3
+ Reduce(function(x, y) {
4
+ i <- which(is.na(x))
5
+ x[i] <- y[i]
6
+ x},
7
+ list(...))
8
+ }
9
+
10
+ # Define capwords() function from the toupper() documentation.
11
+ capwords <- function(s, strict = FALSE) {
12
+ cap <- function(s) paste(toupper(substring(s, 1, 1)),
13
+ {s <- substring(s, 2); if(strict) tolower(s) else s},
14
+ sep = "", collapse = " " )
15
+ sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
16
+ }
1/replication-package/scripts/run_hbg_replication.R ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Initialize workspace.
2
+ # Clear workspace.
3
+ rm(list = ls(all = TRUE))
4
+
5
+ # Set working directory to abp_replication directory.
6
+ setwd("~/Downloads/hbg_replication")
7
+
8
+ ## Prepare output directory and main output files.
9
+ # If an output directory does not exist, create the directory.
10
+ if (!file.exists("output")) {
11
+ dir.create("output")
12
+ }
13
+
14
+ # Create a log file for console output.
15
+ hbg_log <- file("output/hbg_log.txt", open = "wt")
16
+
17
+ # Echo and sink console log to psv_log file.
18
+ sink(hbg_log, append = TRUE)
19
+ sink(hbg_log, append = TRUE, type = "message")
20
+
21
+ ## Replicate files and produce main output.
22
+ # Run abp_replication_code.R script, storing run-time statistics.
23
+ run_time <- system.time({source("scripts/hbg_cleaning.R", echo = TRUE,
24
+ max.deparse.length = 10000)
25
+ source("scripts/hbg_analysis.R", echo = TRUE,
26
+ max.deparse.length = 10000)})
27
+
28
+ # Close main output sink.
29
+ sink()
30
+ sink(type = "message")
31
+
32
+ ## Sink run-time statistics to a run_time output file.
33
+ run_time_file <- file("output/run_time", open = "wt")
34
+ sink(run_time_file, append = TRUE)
35
+ print(run_time)
36
+ sink()
1/should_reproduce.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Figure 1
2
+ Table 1