anonymous-submission-acl2025 commited on
Commit
a54b2f4
·
1 Parent(s): 1d8f13a
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 17/paper.pdf +3 -0
  2. 17/replication_package/code/LICENSE-code.md +11 -0
  3. 17/replication_package/code/LICENSE-data.md +297 -0
  4. 17/replication_package/code/README.md +303 -0
  5. 17/replication_package/code/README.pdf +3 -0
  6. 17/replication_package/code/analysis/descriptive/README.md +21 -0
  7. 17/replication_package/code/analysis/descriptive/code/COVIDResponse.do +168 -0
  8. 17/replication_package/code/analysis/descriptive/code/CommitmentDemand.do +457 -0
  9. 17/replication_package/code/analysis/descriptive/code/DataDescriptives.do +668 -0
  10. 17/replication_package/code/analysis/descriptive/code/HeatmapPlots.R +144 -0
  11. 17/replication_package/code/analysis/descriptive/code/QualitativeEvidence.do +152 -0
  12. 17/replication_package/code/analysis/descriptive/code/SampleStatistics.do +138 -0
  13. 17/replication_package/code/analysis/descriptive/code/Scalars.do +625 -0
  14. 17/replication_package/code/analysis/descriptive/code/Temptation.do +100 -0
  15. 17/replication_package/code/analysis/descriptive/input.txt +3 -0
  16. 17/replication_package/code/analysis/descriptive/make.py +75 -0
  17. 17/replication_package/code/analysis/structural/.RData +3 -0
  18. 17/replication_package/code/analysis/structural/.Rhistory +512 -0
  19. 17/replication_package/code/analysis/structural/README.md +6 -0
  20. 17/replication_package/code/analysis/structural/code/StructuralModel.R +295 -0
  21. 17/replication_package/code/analysis/structural/input.txt +3 -0
  22. 17/replication_package/code/analysis/structural/make.py +67 -0
  23. 17/replication_package/code/analysis/treatment_effects/README.md +22 -0
  24. 17/replication_package/code/analysis/treatment_effects/code/Beliefs.do +359 -0
  25. 17/replication_package/code/analysis/treatment_effects/code/CommitmentResponse.do +1404 -0
  26. 17/replication_package/code/analysis/treatment_effects/code/FDRTable.do +252 -0
  27. 17/replication_package/code/analysis/treatment_effects/code/HabitFormation.do +121 -0
  28. 17/replication_package/code/analysis/treatment_effects/code/Heterogeneity.do +963 -0
  29. 17/replication_package/code/analysis/treatment_effects/code/HeterogeneityInstrumental.do +477 -0
  30. 17/replication_package/code/analysis/treatment_effects/code/ModelHeterogeneity.R +1406 -0
  31. 17/replication_package/code/analysis/treatment_effects/code/SurveyValidation.do +136 -0
  32. 17/replication_package/code/analysis/treatment_effects/input.txt +3 -0
  33. 17/replication_package/code/analysis/treatment_effects/make.py +75 -0
  34. 17/replication_package/code/codebook.xlsx +3 -0
  35. 17/replication_package/code/config.yaml +122 -0
  36. 17/replication_package/code/config_user.yaml +67 -0
  37. 17/replication_package/code/data/README.md +96 -0
  38. 17/replication_package/code/data/__init__.py +0 -0
  39. 17/replication_package/code/data/external.txt +3 -0
  40. 17/replication_package/code/data/input.txt +3 -0
  41. 17/replication_package/code/data/make.py +68 -0
  42. 17/replication_package/code/data/source/__init__.py +0 -0
  43. 17/replication_package/code/data/source/build_master/__init__.py +0 -0
  44. 17/replication_package/code/data/source/build_master/builder.py +328 -0
  45. 17/replication_package/code/data/source/build_master/cleaners/clean_events_alt.py +150 -0
  46. 17/replication_package/code/data/source/build_master/cleaners/clean_events_budget.py +58 -0
  47. 17/replication_package/code/data/source/build_master/cleaners/clean_events_pc.py +60 -0
  48. 17/replication_package/code/data/source/build_master/cleaners/clean_events_snooze.py +43 -0
  49. 17/replication_package/code/data/source/build_master/cleaners/clean_events_snooze_delays.py +16 -0
  50. 17/replication_package/code/data/source/build_master/cleaners/clean_events_status.py +59 -0
17/paper.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b0fc065482b1eb4516d89a5be07f984a421548fd1c966979f01941928ee03a
3
+ size 1797770
17/replication_package/code/LICENSE-code.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2021 Hunt Allcott, Matthew Gentzkow, and Lena Song
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6
+
7
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
+
9
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10
+
11
+
17/replication_package/code/LICENSE-data.md ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/)
3
+ - applies to databases, images, tables, text, and any other objects
4
+
5
+
6
+ #### Creative Commons Attribution 4.0 International Public License
7
+ By exercising the Licensed Rights (defined below), You accept and agree
8
+ to be bound by the terms and conditions of this Creative Commons
9
+ Attribution 4.0 International Public License ("Public License"). To the
10
+ extent this Public License may be interpreted as a contract, You are
11
+ granted the Licensed Rights in consideration of Your acceptance of these
12
+ terms and conditions, and the Licensor grants You such rights in
13
+ consideration of benefits the Licensor receives from making the Licensed
14
+ Material available under these terms and conditions.
15
+
16
+ Section 1 – Definitions.
17
+
18
+ (a) Adapted Material means material subject to Copyright and Similar
19
+ Rights that is derived from or based upon the Licensed Material and in
20
+ which the Licensed Material is translated, altered, arranged,
21
+ transformed, or otherwise modified in a manner requiring permission
22
+ under the Copyright and Similar Rights held by the Licensor. For
23
+ purposes of this Public License, where the Licensed Material is a
24
+ musical work, performance, or sound recording, Adapted Material is
25
+ always produced where the Licensed Material is synched in timed relation
26
+ with a moving image.
27
+
28
+ (b) Adapter's License means the license You apply to Your Copyright
29
+ and Similar Rights in Your contributions to Adapted Material in
30
+ accordance with the terms and conditions of this Public License.
31
+
32
+ (c) Copyright and Similar Rights means copyright and/or similar rights
33
+ closely related to copyright including, without limitation, performance,
34
+ broadcast, sound recording, and Sui Generis Database Rights, without
35
+ regard to how the rights are labeled or categorized. For purposes of
36
+ this Public License, the rights specified in Section 2(b)(1)-(2) are not
37
+ Copyright and Similar Rights.
38
+
39
+ (d) Effective Technological Measures means those measures that, in the
40
+ absence of proper authority, may not be circumvented under laws
41
+ fulfilling obligations under Article 11 of the WIPO Copyright Treaty
42
+ adopted on December 20, 1996, and/or similar international agreements.
43
+
44
+ (e) Exceptions and Limitations means fair use, fair dealing, and/or any
45
+ other exception or limitation to Copyright and Similar Rights that
46
+ applies to Your use of the Licensed Material.
47
+
48
+ (f) Licensed Material means the artistic or literary work, database, or
49
+ other material to which the Licensor applied this Public License.
50
+
51
+ (g) Licensed Rights means the rights granted to You subject to the terms
52
+ and conditions of this Public License, which are limited to all
53
+ Copyright and Similar Rights that apply to Your use of the Licensed
54
+ Material and that the Licensor has authority to license.
55
+
56
+ (h) Licensor means the individual(s) or entity(ies) granting rights
57
+ under this Public License.
58
+
59
+ (i) Share means to provide material to the public by any means or
60
+ process that requires permission under the Licensed Rights, such as
61
+ reproduction, public display, public performance, distribution,
62
+ dissemination, communication, or importation, and to make material
63
+ available to the public including in ways that members of the public may
64
+ access the material from a place and at a time individually chosen by
65
+ them.
66
+
67
+ (j) Sui Generis Database Rights means rights other than copyright
68
+ resulting from Directive 96/9/EC of the European Parliament and of the
69
+ Council of 11 March 1996 on the legal protection of databases, as
70
+ amended and/or succeeded, as well as other essentially equivalent rights
71
+ anywhere in the world.
72
+
73
+ (k) You means the individual or entity exercising the Licensed Rights
74
+ under this Public License. Your has a corresponding meaning.
75
+
76
+ Section 2 – Scope.
77
+
78
+ (a) License grant.
79
+
80
+ 1. Subject to the terms and conditions of this Public License, the
81
+ Licensor hereby grants You a worldwide, royalty-free,
82
+ non-sublicensable, non-exclusive, irrevocable license to exercise the
83
+ Licensed Rights in the Licensed Material to:
84
+
85
+ A. reproduce and Share the Licensed Material, in whole or in part;
86
+ and
87
+
88
+ B. produce, reproduce, and Share Adapted Material.
89
+
90
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
91
+ Exceptions and Limitations apply to Your use, this Public License does
92
+ not apply, and You do not need to comply with its terms and
93
+ conditions.
94
+
95
+ 3. Term. The term of this Public License is specified in Section 6(a).
96
+
97
+ 4. Media and formats; technical modifications allowed. The Licensor
98
+ authorizes You to exercise the Licensed Rights in all media and
99
+ formats whether now known or hereafter created, and to make technical
100
+ modifications necessary to do so. The Licensor waives and/or agrees
101
+ not to assert any right or authority to forbid You from making
102
+ technical modifications necessary to exercise the Licensed Rights,
103
+ including technical modifications necessary to circumvent Effective
104
+ Technological Measures. For purposes of this Public License, simply
105
+ making modifications authorized by this Section 2(a)(4) never produces
106
+ Adapted Material.
107
+
108
+ 5. Downstream recipients.
109
+
110
+ A. Offer from the Licensor – Licensed Material. Every recipient of
111
+ the Licensed Material automatically receives an offer from the
112
+ Licensor to exercise the Licensed Rights under the terms and
113
+ conditions of this Public License.
114
+
115
+ B. No downstream restrictions. You may not offer or impose any
116
+ additional or different terms or conditions on, or apply any
117
+ Effective Technological Measures to, the Licensed Material if
118
+ doing so restricts exercise of the Licensed Rights by any
119
+ recipient of the Licensed Material.
120
+
121
+ 6. No endorsement. Nothing in this Public License constitutes or may
122
+ be construed as permission to assert or imply that You are, or that
123
+ Your use of the Licensed Material is, connected with, or sponsored,
124
+ endorsed, or granted official status by, the Licensor or others
125
+ designated to receive attribution as provided in Section
126
+ 3(a)(1)(A)(i).
127
+
128
+ (b) Other rights.
129
+
130
+ 1. Moral rights, such as the right of integrity, are not licensed
131
+ under this Public License, nor are publicity, privacy, and/or other
132
+ similar personality rights; however, to the extent possible, the
133
+ Licensor waives and/or agrees not to assert any such rights held by
134
+ the Licensor to the limited extent necessary to allow You to exercise
135
+ the Licensed Rights, but not otherwise.
136
+
137
+ 2. Patent and trademark rights are not licensed under this Public
138
+ License.
139
+
140
+ 3. To the extent possible, the Licensor waives any right to collect
141
+ royalties from You for the exercise of the Licensed Rights, whether
142
+ directly or through a collecting society under any voluntary or
143
+ waivable statutory or compulsory licensing scheme. In all other cases
144
+ the Licensor expressly reserves any right to collect such royalties.
145
+
146
+ Section 3 – License Conditions.
147
+
148
+ Your exercise of the Licensed Rights is expressly made subject to the
149
+ following conditions.
150
+
151
+ (a) Attribution.
152
+
153
+ 1. If You Share the Licensed Material (including in modified form),
154
+ You must:
155
+
156
+ A. retain the following if it is supplied by the Licensor with the
157
+ Licensed Material:
158
+
159
+ i. identification of the creator(s) of the Licensed Material and
160
+ any others designated to receive attribution, in any reasonable
161
+ manner requested by the Licensor (including by pseudonym if
162
+ designated);
163
+
164
+ ii. a copyright notice;
165
+
166
+ iii. a notice that refers to this Public License;
167
+
168
+ iv. a notice that refers to the disclaimer of warranties;
169
+
170
+ v. a URI or hyperlink to the Licensed Material to the extent
171
+ reasonably practicable;
172
+
173
+ B. indicate if You modified the Licensed Material and retain an
174
+ indication of any previous modifications; and
175
+
176
+ C. indicate the Licensed Material is licensed under this Public
177
+ License, and include the text of, or the URI or hyperlink to, this
178
+ Public License.
179
+
180
+ 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable
181
+ manner based on the medium, means, and context in which You Share the
182
+ Licensed Material. For example, it may be reasonable to satisfy the
183
+ conditions by providing a URI or hyperlink to a resource that includes
184
+ the required information.
185
+
186
+ 3. If requested by the Licensor, You must remove any of the
187
+ information required by Section 3(a)(1)(A) to the extent reasonably
188
+ practicable.
189
+
190
+ 4. If You Share Adapted Material You produce, the Adapter's License
191
+ You apply must not prevent recipients of the Adapted Material from
192
+ complying with this Public License.
193
+
194
+ Section 4 – Sui Generis Database Rights.
195
+
196
+ Where the Licensed Rights include Sui Generis Database Rights that apply
197
+ to Your use of the Licensed Material:
198
+
199
+ (a) for the avoidance of doubt, Section 2(a)(1) grants You the right to
200
+ extract, reuse, reproduce, and Share all or a substantial portion of the
201
+ contents of the database;
202
+
203
+ (b) if You include all or a substantial portion of the database contents
204
+ in a database in which You have Sui Generis Database Rights, then the
205
+ database in which You have Sui Generis Database Rights (but not its
206
+ individual contents) is Adapted Material; and
207
+
208
+ (c) You must comply with the conditions in Section 3(a) if You Share all
209
+ or a substantial portion of the contents of the database.
210
+
211
+ For the avoidance of doubt, this Section 4 supplements and does not
212
+ replace Your obligations under this Public License where the Licensed
213
+ Rights include other Copyright and Similar Rights.
214
+
215
+ Section 5 – Disclaimer of Warranties and Limitation of Liability.
216
+
217
+ (a) Unless otherwise separately undertaken by the Licensor, to the
218
+ extent possible, the Licensor offers the Licensed Material as-is and
219
+ as-available, and makes no representations or warranties of any kind
220
+ concerning the Licensed Material, whether express, implied, statutory,
221
+ or other. This includes, without limitation, warranties of title,
222
+ merchantability, fitness for a particular purpose, non-infringement,
223
+ absence of latent or other defects, accuracy, or the presence or absence
224
+ of errors, whether or not known or discoverable. Where disclaimers of
225
+ warranties are not allowed in full or in part, this disclaimer may not
226
+ apply to You.
227
+
228
+ (b) To the extent possible, in no event will the Licensor be liable to
229
+ You on any legal theory (including, without limitation, negligence) or
230
+ otherwise for any direct, special, indirect, incidental, consequential,
231
+ punitive, exemplary, or other losses, costs, expenses, or damages
232
+ arising out of this Public License or use of the Licensed Material, even
233
+ if the Licensor has been advised of the possibility of such losses,
234
+ costs, expenses, or damages. Where a limitation of liability is not
235
+ allowed in full or in part, this limitation may not apply to You.
236
+
237
+ (c) The disclaimer of warranties and limitation of liability provided
238
+ above shall be interpreted in a manner that, to the extent possible,
239
+ most closely approximates an absolute disclaimer and waiver of all
240
+ liability.
241
+
242
+ Section 6 – Term and Termination.
243
+
244
+ (a) This Public License applies for the term of the Copyright and
245
+ Similar Rights licensed here. However, if You fail to comply with this
246
+ Public License, then Your rights under this Public License terminate
247
+ automatically.
248
+
249
+ (b) Where Your right to use the Licensed Material has terminated under
250
+ Section 6(a), it reinstates:
251
+
252
+ 1. automatically as of the date the violation is cured, provided it is
253
+ cured within 30 days of Your discovery of the violation; or
254
+
255
+ 2. upon express reinstatement by the Licensor.
256
+
257
+ For the avoidance of doubt, this Section 6(b) does not affect any
258
+ right the Licensor may have to seek remedies for Your violations of
259
+ this Public License.
260
+
261
+ (c) For the avoidance of doubt, the Licensor may also offer the Licensed
262
+ Material under separate terms or conditions or stop distributing the
263
+ Licensed Material at any time; however, doing so will not terminate this
264
+ Public License.
265
+
266
+ (d) Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
267
+
268
+ Section 7 – Other Terms and Conditions.
269
+
270
+ (a) The Licensor shall not be bound by any additional or different terms
271
+ or conditions communicated by You unless expressly agreed.
272
+
273
+ (b) Any arrangements, understandings, or agreements regarding the
274
+ Licensed Material not stated herein are separate from and independent of
275
+ the terms and conditions of this Public License.
276
+
277
+ Section 8 – Interpretation.
278
+
279
+ (a) For the avoidance of doubt, this Public License does not, and shall
280
+ not be interpreted to, reduce, limit, restrict, or impose conditions on
281
+ any use of the Licensed Material that could lawfully be made without
282
+ permission under this Public License.
283
+
284
+ (b) To the extent possible, if any provision of this Public License is
285
+ deemed unenforceable, it shall be automatically reformed to the minimum
286
+ extent necessary to make it enforceable. If the provision cannot be
287
+ reformed, it shall be severed from this Public License without affecting
288
+ the enforceability of the remaining terms and conditions.
289
+
290
+ (c) No term or condition of this Public License will be waived and no
291
+ failure to comply consented to unless expressly agreed to by the
292
+ Licensor.
293
+
294
+ (d) Nothing in this Public License constitutes or may be interpreted as
295
+ a limitation upon, or waiver of, any privileges and immunities that
296
+ apply to the Licensor or You, including from the legal processes of any
297
+ jurisdiction or authority.
17/replication_package/code/README.md ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # README
2
+
3
+ ## Overview
4
+ The code in this replication package constructs the analysis tables, figures and scalars found in our paper using Stata and R.
5
+ The results presented in our paper are obtained in three steps.
6
+ In the first step, all original raw data is processed from our server.
7
+ In the second step, the raw data is stripped of any PII elements and all anonymized datasets are merged together to create a dataset named `final_data_sample.dta`.
8
+ All of the analysis presented in the paper is based on this anonymized data.
9
+ In the third step, all descriptive tables and figures as well as all regression outputs are produced.
10
+
11
+ In this replication archive, we provide the code necessary to carry out all three steps. We provide the anonymized dataset `final_data_sample.dta` in a separate archive (Allcott, Gentzkow, and Song, 2023): https://doi.org/10.7910/DVN/GN636M.
12
+
13
+ Under access to that separate archive, please download `final_data_sample.dta`. Then, manually add the dataset to this archive under the folder `data/temptation/output`. This will allow you to run the `/analysis/` module (third step) which constructs the tables, figures and scalars found in the paper. The module `/data/` relies on confidential data which is not provided, and therefore will not properly run.
14
+
15
+ This replication archive contains additional files to help replication in the `/docs/` folder.
16
+ The first is called `DescriptionOfSteps.pdf` and it describes which modules are included in either steps as well as how they relate to each other. The file `Step1_Step2_DAG.pdf` illustrates how step 1 and 2 are carried via a directed-acyclic graph.
17
+ The third is called `MappingsTablesAndFigures.pdf` and it provides a mapping of all the tables
18
+ and figures to their corresponding program.
19
+
20
+ The replication routine can be run by following the instructions in the **Instructions to replicators** section of this README.
21
+ Support by the authors for replication is provided if necessary. The replicator should expect the code to run for about 40 minutes.
22
+
23
+
24
+ ## Data Availability and Provenance Statements
25
+ This archive includes data that was collected from an Android application and from surveys as detailed in Section 3 of the paper.
26
+ The folder `experiment_design` contains the questionnaires of all 5 surveys (recruitment survey and the next 4 surveys administered to our sample). It also contains a subfolder `AppScreenshots` that has various screenshots of our application Phone Dashboard.
27
+ In the separate archive, we provide the anonymized dataset `final_data_sample.dta` which gathers aggregated usage data from the application and survey data. Each individual in our final sample is assigned a user ID. Variables that correspond to information coming from the application start with `PD_P1`, `PD_P2`, `PD_P3`, `PD_P4`, `PD_P5` (depending on which period 1-5 they were collected). Variables that correspond to information coming from surveys start with either `S1_`, `S2_`, `S3_` or `S4_` (depending on which survey 1-4 they were collected).
28
+ The `codebook.xlsx` file at the root of the repository is the codebook for `final_data_sample.dta`. It lists all the variables found in the dataset along with their labels, units and values (if applicable).
29
+
30
+ ### Statement about Rights
31
+ We certify that the author(s) of the manuscript have legitimate access to and permission to use the data used in this manuscript.
32
+
33
+ ### License for Data
34
+
35
+ All databases, images, tables, text, and any other objects are available under a Creative Commons Attribution 4.0 International Public License. Please refer to the document `LICENSE-data.md` at the root of the repository.
36
+
37
+ ### Summary of Availability
38
+
39
+ Some data **cannot be made** publicly available.
40
+
41
+
42
+ ### Details on each Data Source
43
+
44
+ The raw data for this project are confidential and were collected by the authors. The authors will assist with any reasonable replication attempts and can be contacted by email. This paper uses data obtained from an Android application Phone Dashboard and surveys.
45
+
46
+ The dataset `final_data_sample.dta`, provided in the separate archive, combines the data from both our surveys and our Phone Dashboard application. It is derived after processing all the raw confidential data from the Phone Dashboard application. This dataset aggregates usage data at the user level and combines it with variables obtained from our surveys. All variables in this dataset have corresponding value labels. One can also refer to the provided codebook, `codebook.xlsx`, at the root of the repository, for more information on each variable.
47
+
48
+ ## Dataset list
49
+ - As detailed in the graph `doc/Step1_Step2_DAG.pdf`, our pipeline processes the raw data from our Phone Dashboard application as well as from our surveys. The code for this data-processing is provided in the `/data/` folder. Multiple intermediate files are generated through this pipeline. These files are not provided as part of this replication archive for confidentiality reasons.
50
+
51
+ - The file `final_data_sample.dta`, provided in the separate archive, is obtained at the end of the data-processing pipeline. It combines data from the application and the surveys. It serves as input for the analysis figures and tables. This file is provided in this replication archive.
52
+
53
+
54
+
55
+ ## Computational requirements
56
+ All requirements must be installed and set up for command line usage. For further detail, see the **Command Line Usage** section below.
57
+
58
+ We manage Python and R installations using conda or miniconda.
59
+ To build the repository as-is, the following applications are additionally required:
60
+ * LyX 2.3.5.2
61
+ * R 3.6.3
62
+ * Stata 16.1
63
+ * Python 3.7
64
+
65
+ These software are used by the scripts contained in the repository in the `setup` folder. Instructions to set up the environment are found below in the section `Instructions to run the repository`.
66
+
67
+ ### Software requirements
68
+ The file `setup/conda_env.yaml` will install all the R and Python dependencies. Please refer to the section `Instructions to run the repository` for detailed steps on how to install the required environment and run the scripts.
69
+ Below we list the softwares and packages required to run the repository with the version used.
70
+
71
+ - Python 3.7
72
+ - `pyyaml` (5.3.1)
73
+ - `numpy` (1.16.2)
74
+ - `pandas` (0.25.0)
75
+ - `matplotlib` (3.0.3)
76
+ - `gitpython` (2.1.15)
77
+ - `termcolor` (1.1.0)
78
+ - `colorama` (0.4.3)
79
+ - `jupyter` (4.6.3)
80
+ - `future` (0.17.1)
81
+ - `linearmodels` (4.17)
82
+ - `patsy` (0.5.1)
83
+ - `stochatreat` (0.0.8)
84
+ - `pympler` (0.9)
85
+ - `memory_profiler`
86
+ - `dask`(1.2.1)
87
+ - `openpyxl` (2.6.4)
88
+ - `requests` (2.24.0)
89
+ - `pip` (19)
90
+
91
+ - R 3.6
92
+ - `yaml` (2.2.1)
93
+ - `haven` (2.3.1)
94
+ - `tidyverse` (1.3.1)
95
+ - `r.utils` (4.0.3)
96
+ - `plm` (2.6.1)
97
+ - `janitor` (2.1.0)
98
+ - `rio` (0.5.26)
99
+ - `lubridate` (1.7.10)
100
+ - `magrittr` (2.0.1)
101
+ - `stargazer` (5.2.2)
102
+ - `rootSolve` (1.8.2.1)
103
+ - `rlist` (0.4.6.1)
104
+ - `ebal` (0.1.6)
105
+ - `latex2exp` (0.5.0)
106
+ - `estimatr` (0.30.2)
107
+
108
+ ### Controlled Randomness
109
+ We control randomness by setting random seeds.
110
+ 1. For the data-processing: The program`/data/source/clean_master/cleaner.py` has its own random seed set on line 24. The program `data/source/build_master/builder.py` calls the `/lib/` file `lib/data_helpers/clean_survey.py` that sets a random seed on line 21.The program `lib/experiment_specs/study_config.py` contains parameters used by `data/source/clean_master/management/earnings.py` and `data/source/clean_master/management/midline_prep.py` which include a random seed set on line 459.
111
+ 2. For the analysis: The program `lib/ModelFunctions.R` contains parameters used by `structural/code/StructuralModel.R` and `treatment_effects/code/ModelHeterogeneity.R` which include a random seed set on line 48.
112
+
113
+ ### Memory and Runtime Requirements
114
+ The folder `/data/` is responsible for all the data-processing using the raw Phone Dashboard data as well as the survey data. At the end of this data-processing, the file `final_data_sample.dta` is created. In the presence of the raw confidential data (which is not provided with this replication archive), this whole process normally takes around 60 hours on 20 CPUs and 12GB memory per CPU.
115
+
116
+ The folder `/analysis/` is responsible for the construction of all the figures, plots and scalars used in the paper using the `final_data_sample.dta` dataset provided in the separate archive. The replicator will be able to run all scripts in this folder. The whole analysis takes around 40 minutes to run on a computer with 4 cores and 16GB of memory. Most files within `analysis` take less than 5 minutes to run. However, the file `analysis/code/StructuralModel.R` takes around 20 minutes to run.
117
+
118
+ #### Summary
119
+ Approximate time needed to reproduce the analyses on a standard (2022) desktop machine is <1 hour.
120
+
121
+ #### Details
122
+ The `analysis` code was last run on a **4-core Intel-based laptop with MacOS version 10.15.5**.
123
+
124
+ The `data` code was last run on a **an Intel server with 20 CPUs and 12GB of memory per CPU**. Computation took 60 hours.
125
+
126
+
127
+ ## Description of programs/code
128
+ In this replication archive :
129
+ - The folder `/data/source/` is responsible for all the data processing of our Phone Dashboard application and our surveys.
130
+ The subfolders `/data/source/build_master/`, `/data/source/clean_master/` and `/data/source/exporters/` contains Python files that define the classes and auxiliary functions called in the main script `/data/run.py`. This main script generates the master files gathering all information at the user-level or at the user-app-level.
131
+
132
+ - The folder `/data/temptation/` is responsible for cleaning the master files produced as output of `/data/source/`.
133
+ It outputs the anonymized dataset `final_data_sample.dta` which contains all the information at the user level. This dataset is used throughout the analysis of the paper.
134
+
135
+ - The folder `/analysis/` contains all the programs generating the tables, figures and scalars in the paper. The programs in the `/analysis/` folder has been categorised under three subfolders:
136
+
137
+ 1. `/analysis/descriptive/` produces tables and charts of descriptives statistics. It contains the below programs:
138
+ * `code/CommitmentDemand.do` (willingness-to-pay and limit tightness plots)
139
+ * `code/COVIDResponse.do` (survey stats on response to COVID)
140
+ * `code/DataDescriptive.do` (sample demographics and attrition tables)
141
+ * `code/HeatmapPlots.R` (predicted vs. actual FITSBY usage)
142
+ * `code/QualitativeEvidence.do` (descriptive plots for addiction scale, interest in bonus/limit)
143
+ * `code/SampleStatistics.do` (statistics about completion rates for study)
144
+ * `code/Scalars.do` (statistics about MPL and ideal usage reduction)
145
+ * `code/Temptation.do` (plots desired usage change for various tempting activities)
146
+
147
+ 2. `/analysis/structural/` estimates parameters and generates plots for our structural model. It contains the below program:
148
+ * `code/StructuralModel.R`
149
+
150
+ 3. `/analysis/treatment_effects/` produces model-free estimates of treatment effects. It contains the below programs :
151
+ * `code/Beliefs.do` (compares actual treatment effect with predicted treatment effect)
152
+ * `code/CommitmentResponse.do` (plots how treatment effect differs by SMS addiction scale and other survey indicators)
153
+ * `code/FDRTable.do` (estimates how treatment effect differs by SMS addiction scale and other indicators, adjusted for false-discovery rate. Also plots some descriptive statistics)
154
+ * `code/HabitFormation.do` (compares actual and predicted usage)
155
+ * `code/Heterogeneity.do` (plots heterogeneous treatment effects)
156
+ * `code/HeterogeneityInstrumental.do` (plots heterogeneous treatment effects)
157
+ * `code/ModelHeterogeneity.R` (generates other heterogeneity plots, some temptation plots)
158
+ * `code/SurveyValidation.do` (plots effect of rewarding accurate usage prediction on usage prediction accuracy)
159
+ Most of the programs in the analysis folder rely on the dataset `final_data_sample.dta`. However, some programs further require the datasets `final_data.dta` and `AnalysisUser.dta` to compte certain scalars mentioned in the paper. These programs are `/analysis/descriptive/code/DataDescriptive.do`, `/analysis/descriptive/code/SampleStatistics.do`, `/analysis/descriptive/code/Scalars.do` and `/analysis/treatment_effects/code/ModelHeterogeneity.R`. Since these two datasets are not provided with the replication archive for confidentiality reasons, the portions of the code requiring them have been commented out in the relevant programs.
160
+
161
+ - The folder `/lib/` contains auxiliary functions and helpers.
162
+ - The folder `/paper_slides/` contains all the input and files necessary to the compiling of the paper. The subfolder `/paper_slides/figures/` contains screenshots and other figures that are not derived from programs. The subfolder `/paper_slides/figures/` contains the paper Lyx file, the bibliography as well as the `motivation_correlation.lyx` Lyx table.
163
+ - The folder `setup` contains files to setup the conda environment as well as to install the R, Python and Stata dependencies.
164
+ - The folder `experiment_design` contains the questionnaires to our surveys as well as screenshots from the Phone Dashboard application.
165
+ - The folder `/docs/` contains additional documents to guide the replicator. The file `docs/DescriptionOfSteps.pdf` gives a high-level overview of the steps involved in the data processing from our
166
+ application Phone Dashboard to the analysis in the paper. It splits the data-processing into three steps :
167
+ 1) Processing the Raw Data from PhoneDashboard (done by the `/data/source/` folder)
168
+ 2) Cleaning the Original Data from PhoneDashboard (done by the `/data/temptation/` folder)
169
+ 3) Analyze the Anonymized Data (done by the `/analysis/` folder)
170
+ Since the data inputs for step 1 and 2 are not provided with this replication archive, we include a further document `docs/Step1_Step2_DAG.pdf` that illustrate how we carried them internally via a
171
+ directed-acyclic graph. Finally, the file `docs/MappingsTablesAndFigures.pdf` provides a mapping of all the tables and figures to their corresponding program.
172
+
173
+ Note that the modules or portions of programs that cannot be run due to unshared data have been commented out in the relevant main run scripts.
174
+
175
+ ### License for code
176
+
177
+ All code is available under a MIT License. Please refer to the document `LICENSE-code.md` at the root of the repository.
178
+
179
+ ## Instructions to replicators
180
+
181
+ ### Setup
182
+
183
+ 1. Create a `config_user.yaml` file in the root directory. A template can be found in the `setup` subdirectory. See the **User Configuration** section below for further detail. If you do not have any external paths you wish to specify, and wish to use the default executable names you can skip this step and the default `config_user.yaml` will be copied over in step 4.
184
+
185
+ 2. If you already have conda setup on your local machine, feel free to skip this step. If not, this will install a lightweight version of conda that will not interfere with your current python and R installations.
186
+ Install miniconda and jdk to be used to manage the R/Python virtual environment, if you have not already done this. You can install these programs from their websites [here for miniconda](https://docs.conda.io/en/latest/miniconda.html) and [here for jdk](https://www.oracle.com/java/technologies/javase-downloads.html). If you use homebrew (which can be download [here](https://brew.sh/)) these two programs can be downloaded as follows:
187
+ ```
188
+ brew install --cask miniconda
189
+ brew install --cask oracle-jdk
190
+ ```
191
+ Once you have done this you need to initialize conda by running the following lines and restarting your terminal:
192
+ ```
193
+ conda config --set auto_activate_base false
194
+ conda init $(echo $0 | cut -d'-' -f 2)
195
+ ```
196
+
197
+ 3. Create conda environment with the command:
198
+ ```
199
+ conda env create -f setup/conda_env.yaml
200
+ ```
201
+
202
+ 4. Run the `check_setup.py` file. One way to do this is to run the following bash command in a terminal from the `setup` subdirectory:
203
+ ```
204
+ python3 check_setup.py
205
+ ```
206
+
207
+ 5. Install R dependencies that cannot be managed using conda with the `setup_r.r` file. One way to do this is to run the following bash command in a terminal from the `setup` subdirectory:
208
+ ```
209
+ Rscript setup_r.r
210
+ ```
211
+
212
+ ### Usage
213
+
214
+ Once you have succesfully completed the **Setup** section above, each time that you run any analysis make sure the virtual environment associated with this project is activated, using the command below (replacing with the name of this project).
215
+ ```
216
+ conda activate PROJECT_NAME
217
+ ```
218
+ If you wish to return to your base installation of python and R you can easily deactivate this virtual environment using the command below:
219
+ ```
220
+ conda deactivate
221
+ ```
222
+
223
+ ### Adding Packages
224
+ #### Python
225
+ Add any required packages to `setup/conda_env.yaml`. If possible add the package version number. If there is a package that is not available from `conda` add this to the `pip` section of the `yaml` file. In order to not re-run the entire environment setup you can download these individual files from `conda` with the command
226
+
227
+ ```
228
+ conda install -c conda-forge <PACKAGE>
229
+ ```
230
+
231
+ #### R
232
+ Add any required packages that are available via CRAN to `setup/conda_env.yaml`. These must be prepended with `r-`. If there is a package that is only available from GitHub and not from CRAN, add this package to `setup/setup_r.r`. These individual packages can be added in the same way as Python packages above (with the `r-` prepend).
233
+
234
+ #### Stata
235
+
236
+ Install Stata dependencies using `setup/download_stata_ado.do`. We keep all non-base Stata ado files in the `lib` subdirectory, so most non-base Stata ado files will be versioned. To add additional stata dependencies, use the following bash command from the `setup` subdirectory:
237
+ ```
238
+ stata-mp -e download_stata_ado.do
239
+ ```
240
+
241
+ ### Build
242
+
243
+ 1. Follow the *Setup* instructions above.
244
+
245
+ 2. From the root of repository, run the following bash command:
246
+ ```
247
+ python run_all.py
248
+ ```
249
+
250
+ ### Command Line Usage
251
+
252
+ For specific instructions on how to set up command line usage for an application, refer to the [RA manual](https://github.com/gentzkow/template/wiki/Command-Line-Usage).
253
+
254
+ By default, the repository assumes the following executable names for the following applications:
255
+
256
+ ```
257
+ application : executable
258
+ python : python
259
+ lyx : lyx
260
+ r : Rscript
261
+ stata : statamp (will need to be updated if using a version of Stata that is not Stata-MP)
262
+ ```
263
+
264
+ Default executable names can be updated in `config_user.yaml`. For further detail, see the **User Configuration** section below.
265
+
266
+ ## User Configuration
267
+ `config_user.yaml` contains settings and metadata such as local paths that are specific to an individual user and thus should not be committed to Git. For this repository, this includes local paths to [external dependencies](https://github.com/gentzkow/template/wiki/External-Dependencies) as well as executable names for locally installed software.
268
+
269
+ Required applications may be set up for command line usage on your computer with a different executable name from the default. If so, specify the correct executable name in `config_user.yaml`. This configuration step is explained further in the [RA manual](https://github.com/gentzkow/template/wiki/Repository-Structure#Configuration-Files).
270
+
271
+ ## Windows Differences
272
+ The instructions above are for Linux and Mac users. However, with just a handful of small tweaks, this repo can also work on Windows.
273
+
274
+ If you are using Windows, you may need to run certain bash commands in administrator mode due to permission errors. To do so, open your terminal by right clicking and selecting `Run as administrator`. To set administrator mode on permanently, refer to the [RA manual](https://github.com/gentzkow/template/wiki/Repository-Usage#Administrator-Mode).
275
+
276
+ The executable names are likely to differ on your computer if you are using Windows. Executable names for Windows will typically look like the following:
277
+
278
+ ```
279
+ application : executable
280
+ python : python
281
+ lyx : LyX#.# (where #.# refers to the version number)
282
+ r : Rscript
283
+ stata : StataMP-64 (will need to be updated if using a version of Stata that is not Stata-MP or 64-bit)
284
+ ```
285
+
286
+ To download additional `ado` files on Windows, you will likely have to adjust this bash command:
287
+ ```
288
+ stata_executable -e download_stata_ado.do
289
+ ```
290
+
291
+ `stata_executable` refers to the name of your Stata executable. For example, if your Stata executable was located in `C:\Program Files\Stata15\StataMP-64.exe`, you would want to use the following bash command:
292
+
293
+ ```
294
+ StataMP-64 -e download_stata_ado.do
295
+ ```
296
+
297
+
298
+ ## List of tables and programs
299
+ The file `docs/MappingsTablesAndFigures.pdf` provides a mapping of all the tables and figures to their corresponding program.
300
+
301
+ ## References
302
+ Allcott, Hunt, Matthew Gentzkow, and Lena Song. “Data for: Digital Addiction.” Harvard Dataverse, 2023. https://doi.org/10.7910/DVN/GN636M.
303
+ Allcott, Hunt, Matthew Gentzkow, and Lena Song. “Digital Addiction.” American Economic Review 112, no. 7 (July 2022): 2424–63. https://doi.org/10.1257/aer.20210867.
17/replication_package/code/README.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0accfd826dda5929fe257e41b3c25bde9551fa0ef29d46c999fa967245c811a0
3
+ size 92738
17/replication_package/code/analysis/descriptive/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # README
2
+
3
+ This module produces tables and charts of descriptives statistics.
4
+
5
+ `/code/` contains the below files :
6
+
7
+ * CommitmentDemand.do (willingness-to-pay and limit tightness plots)
8
+
9
+ * COVIDResponse.do (survey stats on response to COVID)
10
+
11
+ * DataDescriptive.do (sample demographics and attrition tables)
12
+
13
+ * HeatmapPlots.R (predicted vs. actual FITSBY usage)
14
+
15
+ * QualitativeEvidence.do (descriptive plots for addiction scale, interest in bonus/limit)
16
+
17
+ * SampleStatistics.do (statistics about completion rates for study)
18
+
19
+ * Scalars.do (statistics about MPL and ideal usage reduction)
20
+
21
+ * Temptation.do (plots desired usage change for various tempting activities)
17/replication_package/code/analysis/descriptive/code/COVIDResponse.do ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Baseline qualitative evidence
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ end
18
+
19
+ program define_plot_settings
20
+ global HIST_SETTINGS ///
21
+ xtitle(" " "Fraction of sample") ///
22
+ bcolor(maroon) graphregion(color(white)) ///
23
+ xsize(6.5) ysize(4.5)
24
+
25
+ global HIST_DISCRETE_SETTINGS ///
26
+ gap(50) ylabel(, valuelabel noticks angle(horizontal)) ///
27
+ $HIST_SETTINGS
28
+
29
+ global CISPIKE_SETTINGS ///
30
+ spikecolor(maroon gray) ///
31
+ cicolor(maroon gray) ///
32
+
33
+ global CISPIKE_VERTICAL_GRAPHOPTS ///
34
+ ylabel(#6) ///
35
+ xsize(6.5) ysize(4.5)
36
+
37
+ global CISPIKE_STACKED_GRAPHOPTS ///
38
+ row(2) ///
39
+ graphregion(color(white)) ///
40
+ xsize(5.5) ysize(8)
41
+ end
42
+
43
+ **********************
44
+ * Analysis functions *
45
+ **********************
46
+
47
+ program main
48
+ define_constants
49
+ define_plot_settings
50
+ import_data
51
+
52
+ plot_hist_covid
53
+ plot_cispike_covid
54
+ end
55
+
56
+ program import_data
57
+ use "input/final_data_sample.dta", clear
58
+ end
59
+
60
+ program plot_hist_covid
61
+ twoway hist S1_CovidChangesFreeTime, frac discrete horizontal ///
62
+ $HIST_DISCRETE_SETTINGS ///
63
+ ytitle("Change in free time" " ") ///
64
+ ylabel(1(1)7)
65
+
66
+ graph export "output/hist_covid.pdf", replace
67
+
68
+ recode S1_CovidChangeReason ///
69
+ (1 = 4 "Increased phone usage") ///
70
+ (2 = 4 "Increased phone usage") ///
71
+ (3 = 3 "No change") ///
72
+ (4 = 4 "Increased phone usage") ///
73
+ (5 = 2 "Decreased phone usage") ///
74
+ (6 = 1 "Other"), ///
75
+ gen(S1_CovidChangeReason_recode)
76
+
77
+ twoway hist S1_CovidChangeReason_recode, ///
78
+ frac discrete horizontal ///
79
+ $HIST_DISCRETE_SETTINGS ///
80
+ ytitle("Effect of COVID-19 on phone use" " ")
81
+
82
+ graph export "output/hist_covid_reason.pdf", replace
83
+ end
84
+
85
+ program plot_cispike_covid
86
+ * Preserve data
87
+ preserve
88
+
89
+ * Reshape data
90
+ keep UserID S1_PhoneUseChange* S1_LifeBetter*
91
+ rename S1_PhoneUseChange* S1_PhoneUseChange_*
92
+ rename S1_LifeBetter* S1_LifeBetter_*
93
+ rename_but, varlist(UserID) prefix(outcome)
94
+ reshape long outcome, i(UserID) j(measure) string
95
+
96
+ split measure, p("_")
97
+ drop measure measure1
98
+ rename (measure2 measure3) (measure time)
99
+ replace time = "2020" if time == ""
100
+
101
+ * Recode data
102
+ encode measure, generate(measure_encode)
103
+ encode time, generate(time_encode)
104
+
105
+ recode measure_encode ///
106
+ (1 = 1 "Phone use makes life better") ///
107
+ (2 = 2 "Ideal use change"), ///
108
+ gen(measure_recode)
109
+
110
+ recode time_encode ///
111
+ (1 = 1 "2019") ///
112
+ (2 = 2 "Now"), ///
113
+ gen(time_recode)
114
+
115
+ * Plot data
116
+ gen dummy = 1
117
+
118
+
119
+ ttest outcome if measure_encode == 1, by(time_recode)
120
+ local diff : display %9.3fc `r(mu_2)' - `r(mu_1)'
121
+ local diff = subinstr("`diff'", " ", "", .)
122
+ local se : display %9.3fc `r(se)'
123
+ local se = subinstr("`se'", " ", "", .)
124
+
125
+ ciquartile outcome if measure_encode == 1, ///
126
+ over1(dummy) over2(time_recode) ///
127
+ $CISPIKE_SETTINGS ///
128
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
129
+ ytitle("Phone use makes life better" " ") ///
130
+ ysc(r(-1)) ///
131
+ legend(off) ///
132
+ text(-0.75 0 "Difference in means = `diff' (`se')", place(e)))
133
+
134
+ graph save "output/cispike_covid_life.gph", replace
135
+
136
+ ttest outcome if measure_encode == 2, by(time_recode)
137
+ local diff : display %9.3fc `r(mu_2)' - `r(mu_1)'
138
+ local diff = subinstr("`diff'", " ", "", .)
139
+ local se : display %9.3fc `r(se)'
140
+ local se = subinstr("`se'", " ", "", .)
141
+
142
+ ciquartile outcome if measure_encode == 2, ///
143
+ over1(dummy) over2(time_recode) ///
144
+ $CISPIKE_SETTINGS ///
145
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
146
+ ytitle("Ideal use change" " ") ///
147
+ ysc(r(-40)) ///
148
+ legend(off) ///
149
+ text(-37.5 0 "Difference in means = `diff' (`se')", place(e)))
150
+
151
+ graph save "output/cispike_covid_ideal.gph", replace
152
+
153
+ graph combine ///
154
+ "output/cispike_covid_ideal.gph" ///
155
+ "output/cispike_covid_life.gph", ///
156
+ $CISPIKE_STACKED_GRAPHOPTS
157
+
158
+ graph export "output/cispike_covid.pdf", replace
159
+
160
+ * Restore data
161
+ restore
162
+ end
163
+
164
+ ***********
165
+ * Execute *
166
+ ***********
167
+
168
+ main
17/replication_package/code/analysis/descriptive/code/CommitmentDemand.do ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Demand for commitment, moderated by demand for flexibility
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ end
18
+
19
+ program define_plot_settings
20
+ global HIST_SETTINGS ///
21
+ bcolor(maroon) graphregion(color(white)) ///
22
+ xsize(6.5) ysize(4.5)
23
+
24
+ global HIST_DISCRETE_SETTINGS ///
25
+ gap(50) xlabel(, valuelabel noticks) ///
26
+ $HIST_SETTINGS
27
+
28
+ global HIST_SNOOZE_SETTINGS ///
29
+ gap(50) ylabel(1(1)10, valuelabel noticks angle(horizontal) labsize(small)) ///
30
+ xtitle(" " "Fraction of sample") ///
31
+ $HIST_SETTINGS
32
+
33
+ global HIST_CONTINUOUS_SETTINGS ///
34
+ $HIST_SETTINGS
35
+
36
+ global CISPIKE_SETTINGS ///
37
+ spikecolor(maroon black gray) ///
38
+ cicolor(maroon black gray)
39
+
40
+ global CISPIKE_SETTINGS4 ///
41
+ spikecolor(maroon black gray navy) ///
42
+ cicolor(maroon black gray navy)
43
+
44
+ global CISPIKE_VERTICAL_LARGE_GRAPHOPTS ///
45
+ ylabel(#6) ///
46
+ xsize(8) ysize(4.5) ///
47
+ legend(cols(4))
48
+
49
+ global CISPIKE_VERTICAL_GRAPHOPTS ///
50
+ ylabel(#6) ///
51
+ xsize(6.5) ysize(4.5) ///
52
+ legend(cols(4))
53
+ end
54
+
55
+ **********************
56
+ * Analysis functions *
57
+ **********************
58
+
59
+ program main
60
+ define_constants
61
+ define_plot_settings
62
+ import_data
63
+
64
+ plot_midline_demand
65
+ plot_wtp_for_rsi
66
+ plot_wtp_for_limit
67
+ plot_wtp_for_limit_by_limit
68
+ plot_wtp_for_limit_by_bonus
69
+ plot_limit_tight
70
+ plot_limit_tight, fitsby
71
+ plot_limit_tight_by_limit
72
+ plot_limit_tight_by_limit, fitsby
73
+ plot_limit_tight_dist
74
+ plot_preferred_snooze
75
+ plot_motivation_by_reason
76
+ plot_motivation_bar
77
+ end
78
+
79
+ program import_data
80
+ use "input/final_data_sample.dta", clear
81
+ end
82
+
83
+ program plot_midline_demand
84
+ * Preserve data
85
+ preserve
86
+
87
+ * Reshape data
88
+ keep UserID S2_PredictUseInitialEarn S2_PredictUseBonusEarn S2_MPL
89
+ rename_but, varlist(UserID) prefix(dollar)
90
+ reshape long dollar, i(UserID) j(measure) string
91
+
92
+ * Recode data
93
+ encode measure, generate(measure_encode)
94
+
95
+ * Plot data
96
+ gen dummy = 1
97
+
98
+ cispike dollar, ///
99
+ over1(dummy) over2(measure_encode) ///
100
+ $CISPIKE_SETTINGS gap2(100) ///
101
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
102
+ ytitle("Dollars" " ") ///
103
+ /// Labels too long for encode
104
+ xlabel(0.5 " " ///
105
+ 1 `" "Valuation of" "Bonus" "' ///
106
+ 3 `" "Expected earnings" "at predicted usage" "with Bonus" "' ///
107
+ 5 `" "Expected earnings" "at predicted usage" "without Bonus" "' ///
108
+ 5.5 " ") ///
109
+ legend(off))
110
+
111
+ graph export "output/cispike_midline_demand.pdf", replace
112
+
113
+ * Restore data
114
+ restore
115
+ end
116
+
117
+ program plot_wtp_for_rsi
118
+ hist S2_MPL, frac discrete ///
119
+ xtitle(" " "Valuation of bonus ($)") ///
120
+ ytitle("Fraction of sample" " ") ///
121
+ $HIST_DISCRETE_SETTINGS
122
+
123
+ graph export "output/hist_rsi_wtp.pdf", replace
124
+ end
125
+
126
+ program plot_wtp_for_limit
127
+ hist S3_MPLLimit, frac ///
128
+ xtitle(" " "Valuation of limit functionality ($)") ///
129
+ ytitle("Fraction of sample" " ") ///
130
+ $HIST_DISCRETE_SETTINGS
131
+
132
+ graph export "output/hist_limit_wtp.pdf", replace
133
+ end
134
+
135
+ program plot_wtp_for_limit_by_limit
136
+ * Preserve data
137
+ preserve
138
+
139
+ * Add average
140
+ tempfile temp
141
+ save `temp', replace
142
+ keep if inlist(S2_LimitType, 1, 2, 3, 4, 5)
143
+ replace S2_LimitType = 6
144
+ append using `temp'
145
+
146
+ * Recode data
147
+ recode S2_LimitType ///
148
+ (0 = .) ///
149
+ (1 = 2 "Snooze 0") ///
150
+ (2 = 3 "Snooze 2") ///
151
+ (3 = 4 "Snooze 5") ///
152
+ (4 = 5 "Snooze 20") ///
153
+ (5 = 6 "No snooze") ///
154
+ (6 = 1 "All limits"), ///
155
+ gen(S2_LimitType_recode)
156
+
157
+ * Plot data
158
+ gen dummy = 1
159
+
160
+ cispike S3_MPLLimit, ///
161
+ over1(dummy) over2(S2_LimitType_recode) ///
162
+ $CISPIKE_SETTINGS ///
163
+ graphopts($CISPIKE_VERTICAL_LARGE_GRAPHOPTS ///
164
+ ytitle("Willingness-to-pay for limit (dollars)" " ") ///
165
+ legend(off))
166
+
167
+ graph export "output/cispike_limit_wtp.pdf", replace
168
+
169
+ * Restore data
170
+ restore
171
+ end
172
+
173
+ program plot_wtp_for_limit_by_bonus
174
+ * Preserve data
175
+ preserve
176
+
177
+ * Recode data
178
+ recode S3_Bonus ///
179
+ (0 = 0 "Control") ///
180
+ (1 = 1 "Bonus"), ///
181
+ gen(S3_Bonus_recode)
182
+
183
+ * Plot data
184
+ gen dummy = 1
185
+
186
+ cispike S3_MPLLimit, ///
187
+ over1(dummy) over2(S3_Bonus_recode) ///
188
+ $CISPIKE_SETTINGS ///
189
+ graphopts($CISPIKE_VERTICAL_LARGE_GRAPHOPTS ///
190
+ ytitle("Willingness-to-pay for Limit (dollars)" " ") ///
191
+ legend(off))
192
+
193
+ graph export "output/cispike_limit_wtp_by_bonus.pdf", replace
194
+
195
+ * Restore data
196
+ restore
197
+ end
198
+
199
+ program plot_limit_tight
200
+ syntax, [fitsby]
201
+
202
+ if ("`fitsby'" == "fitsby") {
203
+ local fitsby "FITSBY"
204
+ local suffix "_fitsby"
205
+ }
206
+
207
+ else {
208
+ local fitsby ""
209
+ local suffix ""
210
+ }
211
+
212
+ * Preserve data
213
+ preserve
214
+
215
+ * Reshape data
216
+ keep UserID S2_LimitType *LimitTight`fitsby'
217
+ rename_but, varlist(UserID S2_LimitType) prefix(tight)
218
+ reshape long tight, i(UserID S2_LimitType) j(measure) string
219
+
220
+ * Recode data
221
+ sort measure
222
+ encode measure, generate(measure_encode)
223
+
224
+ recode measure_encode ///
225
+ (1 = 1 "Period 2") ///
226
+ (2 = 2 "Period 3") ///
227
+ (5 = 3 "Period 4") ///
228
+ (7 = 4 "Period 5") ///
229
+ (4 = 5 "Periods 3 & 4") ///
230
+ (3 = 6 "Periods 2 to 4") ///
231
+ (6 = 7 "Periods 2 to 5"), ///
232
+ gen(measure_recode)
233
+
234
+ * Plot data
235
+ gen dummy = 1
236
+
237
+ cispike tight if measure_recode <= 4, ///
238
+ over1(dummy) over2(measure_recode) ///
239
+ $CISPIKE_SETTINGS4 ///
240
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
241
+ ytitle("Limit tightness (minutes/day)" " ") ///
242
+ legend(off))
243
+
244
+ graph export "output/cispike_limit_tight`suffix'.pdf", replace
245
+
246
+ * Restore data
247
+ restore
248
+ end
249
+
250
+ program plot_limit_tight_by_limit
251
+ syntax, [fitsby]
252
+
253
+ if ("`fitsby'" == "fitsby") {
254
+ local fitsby "FITSBY"
255
+ local suffix "_fitsby"
256
+ }
257
+
258
+ else {
259
+ local fitsby ""
260
+ local suffix ""
261
+ }
262
+
263
+ * Preserve data
264
+ preserve
265
+
266
+ * Reshape data
267
+ keep UserID S2_LimitType *LimitTight`fitsby'
268
+ rename_but, varlist(UserID S2_LimitType) prefix(tight)
269
+ reshape long tight, i(UserID S2_LimitType) j(measure) string
270
+
271
+ * Recode data
272
+ sort measure
273
+ encode measure, generate(measure_encode)
274
+
275
+ recode measure_encode ///
276
+ (1 = 1 "Period 2") ///
277
+ (2 = 2 "Period 3") ///
278
+ (5 = 3 "Period 4") ///
279
+ (7 = 4 "Period 5") ///
280
+ (4 = 5 "Periods 3 & 4") ///
281
+ (3 = 6 "Periods 2 to 4") ///
282
+ (6 = 7 "Periods 2 to 5"), ///
283
+ gen(measure_recode)
284
+
285
+ recode S2_LimitType ///
286
+ (0 = .) ///
287
+ (1 = 1 "Snooze 0") ///
288
+ (2 = 2 "Snooze 2") ///
289
+ (3 = 3 "Snooze 5") ///
290
+ (4 = 4 "Snooze 20") ///
291
+ (5 = 5 "No snooze"), ///
292
+ gen(S2_LimitType_recode)
293
+
294
+ * Plot data (all periods together) 2 - 5
295
+ gen dummy = 1
296
+
297
+ cispike tight if measure_recode == 7, ///
298
+ over1(dummy) over2(S2_LimitType_recode) ///
299
+ $CISPIKE_SETTINGS ///
300
+ graphopts($CISPIKE_VERTICAL_LARGE_GRAPHOPTS ///
301
+ ytitle("Limit tightness (minutes/day)" " ") ///
302
+ xlabel(, labsize(medlarge)) xtitle(, size(medlarge)) ///
303
+ ylabel(, labsize(medlarge)) ytitle(, size(medlarge)) ///
304
+ legend(off))
305
+
306
+ graph export "output/cispike_limit_tight_combined_by_limit`suffix'.pdf", replace
307
+
308
+ * Plot data (by period)
309
+ cispike tight if measure_recode <= 4, ///
310
+ over1(measure_recode) over2(S2_LimitType_recode) ///
311
+ $CISPIKE_SETTINGS4 ///
312
+ graphopts($CISPIKE_VERTICAL_LARGE_GRAPHOPTS ///
313
+ ytitle("Limit tightness (minutes/day)" " ") ///
314
+ xlabel(, labsize(medlarge)) xtitle(, size(medlarge)) ///
315
+ ylabel(, labsize(medlarge)) ytitle(, size(medlarge)) ///
316
+ legend(size(medlarge)))
317
+
318
+ graph export "output/cispike_limit_tight_by_limit`suffix'.pdf", replace
319
+
320
+ * Restore data
321
+ restore
322
+ end
323
+
324
+ program plot_limit_tight_dist
325
+ * Preserve data
326
+ preserve
327
+
328
+ * Plot data (by period)
329
+ hist PD_P2_LimitTight, frac ///
330
+ xtitle(" " "Period 2 limit tightness (minutes/day)") ///
331
+ ytitle("Fraction of sample" " ") ///
332
+ $HIST_CONTINUOUS_SETTINGS
333
+
334
+ graph export "output/hist_limit_tight_p2.pdf", replace
335
+
336
+ * Plot data (all periods together)
337
+ hist PD_P5432_LimitTight, frac ///
338
+ xtitle(" " "Periods 2 to 5 limit tightness (minutes/day)") ///
339
+ ytitle("Fraction of sample" " ") ///
340
+ $HIST_CONTINUOUS_SETTINGS
341
+
342
+ graph export "output/hist_limit_tight.pdf", replace
343
+
344
+ * Reshape data
345
+ keep UserID PD_P2_LimitTight_*
346
+ drop *Other
347
+ reshape long PD_P2, i(UserID) j(measure) string
348
+
349
+ * Recode data
350
+ sort measure
351
+ encode measure, generate(measure_encode)
352
+
353
+ recode measure_encode ///
354
+ (2 = 1 "Facebook") ///
355
+ (3 = 2 "Instagram") ///
356
+ (5 = 3 "Twitter") ///
357
+ (4 = 4 "Snapchat") ///
358
+ (1 = 5 "Browser") ///
359
+ (6 = 6 "YouTube"), ///
360
+ gen(measure_recode)
361
+
362
+ * Plot data (by app)
363
+ local app_1 "Facebook"
364
+ local app_2 "Instagram"
365
+ local app_3 "Twitter"
366
+ local app_4 "Snapchat"
367
+ local app_5 "Browser"
368
+ local app_6 "YouTube"
369
+
370
+ foreach num of numlist 1/6 {
371
+ hist PD_P2 if measure_encode == `num', frac ///
372
+ xtitle(" " "Period 2 limit tightness for `app_`num'' (minutes/day)") ///
373
+ ytitle("Fraction of sample" " ") ///
374
+ $HIST_CONTINUOUS_SETTINGS ///
375
+ xlabel(, labsize(large)) xtitle(, size(large)) ///
376
+ ylabel(, labsize(large)) ytitle(, size(large)) ///
377
+ legend(size(large))
378
+
379
+ graph export "output/hist_limit_tight_`num'.pdf", replace
380
+ }
381
+
382
+ * Restore data
383
+ restore
384
+ end
385
+
386
+ program plot_preferred_snooze
387
+ recode S4_PreferredSnooze ///
388
+ (1 = 1 "No delay") ///
389
+ (2 = 2 "1 minute") ///
390
+ (3 = 3 "2 minutes") ///
391
+ (4 = 4 "3-4 minutes") ///
392
+ (5 = 5 "5 minutes") ///
393
+ (6 = 6 "10 minutes") ///
394
+ (7 = 7 "20 minutes") ///
395
+ (8 = 8 "30 minutes+") ///
396
+ (9 = 9 "Prefer no snooze") ///
397
+ (10 = 10 "Does not matter"), ///
398
+ gen(S4_PreferredSnooze_short_names)
399
+
400
+ twoway hist S4_PreferredSnooze_short_names, ///
401
+ frac discrete horizontal ///
402
+ $HIST_SNOOZE_SETTINGS ///
403
+ ytitle("Preferred Snooze Length (minutes)" " ")
404
+
405
+
406
+ graph export "output/hist_preferred_snooze.pdf", replace
407
+ end
408
+
409
+ program plot_motivation_by_reason
410
+ preserve
411
+ * Plot data
412
+ gen dummy = 1
413
+
414
+ cispike S2_Motivation, ///
415
+ over1(dummy) over2(S2_MPLReasoning) ///
416
+ $CISPIKE_SETTINGS4 gap2(100) ///
417
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
418
+ ytitle("Behavior change premium ($)" " ") ///
419
+ /// Labels too long for encode
420
+ xlabel(0.5 " " ///
421
+ 1 `" "Only wanted" "to maximize" "earnings" "' ///
422
+ 3 `" "Wanted incentive" "to use phone" "less" "' ///
423
+ 5 `" "Don't want pressure" "to use phone" "less" "' ///
424
+ 7 `" "Other" "' ///
425
+ 7.5 " ") ///
426
+ legend(off))
427
+
428
+ graph export "output/cispike_motivation_reason.pdf", replace
429
+
430
+ * Restore data
431
+ restore
432
+ end
433
+
434
+ program plot_motivation_bar
435
+ preserve
436
+ * Plot data
437
+ twoway hist S2_MPLReasoning, frac discrete ///
438
+ $HIST_DISCRETE_SETTINGS ///
439
+ xlabel(1 `" "Only wanted" "to maximize" "earnings" "' ///
440
+ 2 `" "Wanted incentive" "to use phone" "less" "' ///
441
+ 3 `" "Don't want pressure" "to use phone" "less" "' ///
442
+ 4 `" "Other" "') ///
443
+ ytitle("Fraction of sample" " ") ///
444
+ xtitle("")
445
+
446
+
447
+ graph export "output/hist_motivation_mpl.pdf", replace
448
+
449
+ * Restore data
450
+ restore
451
+ end
452
+
453
+ ***********
454
+ * Execute *
455
+ ***********
456
+
457
+ main
17/replication_package/code/analysis/descriptive/code/DataDescriptives.do ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Description of data
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ end
18
+
19
+ program define_settings
20
+ global DESCRIPTIVE_TAB ///
21
+ collabels(none) nodepvars noobs replace
22
+
23
+ global DESCRIPTIVE_TAB_DETAILED ///
24
+ nomtitle nonumbers noobs compress label replace ///
25
+ cells((mean(fmt(%8.1fc)) ///
26
+ sd(fmt(%8.1fc)) ///
27
+ min(fmt(%8.0fc)) ///
28
+ max(fmt(%8.0fc)))) ///
29
+ collabels("\shortstack{Mean}" ///
30
+ "\shortstack{Standard\\deviation}" ///
31
+ "\shortstack{Minimum\\value}" ///
32
+ "\shortstack{Maximum\\value}") ///
33
+
34
+ global BALANCE_TAB ///
35
+ order(1 0) grplabels(1 Treatment @ 0 Control) ///
36
+ pftest pttest ftest fmissok vce(robust) stdev ///
37
+ rowvarlabel onenrow tblnonote format(%8.2fc) replace
38
+
39
+ global HIST_CONTINUOUS_SETTINGS ///
40
+ bcolor(maroon) graphregion(color(white)) ///
41
+ xsize(6.5) ysize(4)
42
+
43
+ global BAR_SETTINGS ///
44
+ region(lcolor(white))) graphregion(color(white)) ///
45
+ xsize(6.5) ysize(4)
46
+ end
47
+
48
+ **********************
49
+ * Analysis functions *
50
+ **********************
51
+
52
+ program main
53
+ define_constants
54
+ define_settings
55
+ import_data
56
+ clean_data
57
+
58
+ sample_demographics_balance_all
59
+ sample_demographics
60
+ sample_demographics_balance
61
+ * limit_attrition
62
+ * bonus_attrition
63
+ balance
64
+ historical_use
65
+ historical_use, fitsby
66
+ summary_welfare
67
+ share_use_by_app
68
+ addiction_plot
69
+ end
70
+
71
+ program import_data
72
+ use "input/final_data_sample.dta", clear
73
+
74
+ foreach time in S3 S4 {
75
+ replace `time'_Finished = 0 if `time'_Finished == .
76
+ }
77
+ end
78
+
79
+ program clean_data
80
+ * Demographics
81
+ recode S1_Income ///
82
+ (1 = 5) ///
83
+ (2 = 15) ///
84
+ (3 = 25) ///
85
+ (4 = 35) ///
86
+ (5 = 45) ///
87
+ (6 = 55) ///
88
+ (7 = 67) ///
89
+ (8 = 87.5) ///
90
+ (9 = 112.5) ///
91
+ (10 = 137.5) ///
92
+ (11 = 150) ///
93
+ (12 = .), ///
94
+ gen(income)
95
+
96
+ gen college = (S1_Education >= 5)
97
+ gen male = (S0_Gender == 1)
98
+ gen white = (S1_Race == 5)
99
+
100
+ * Limit treatment
101
+ gen limit_T = 1 if S2_LimitType > 0 & S2_LimitType != .
102
+ replace limit_T = 0 if S2_LimitType == 0
103
+
104
+ * Labels
105
+ label var college "College"
106
+ label var male "Male"
107
+ label var white "White"
108
+
109
+ label var income "Income (\\$000s)"
110
+ label var S0_Age "Age"
111
+ label var PD_P1_UsageFITSBY "Period 1 FITSBY use (minutes/day)"
112
+ end
113
+
114
+ program sample_demographics
115
+ local varset income college male white S0_Age PD_P1_Usage PD_P1_UsageFITSBY
116
+
117
+ * Sample demographics
118
+ estpost tabstat `varset', statistics(mean) columns(statistics)
119
+ est store sample_col
120
+
121
+ * Preserve data
122
+ preserve
123
+
124
+ * US demographics
125
+ replace income = 43.01
126
+ replace college = 0.3009
127
+ replace male = 0.4867
128
+ replace white = 0.73581
129
+ replace S0_Age = 47.6
130
+ replace PD_P1_Usage = .
131
+ replace PD_P1_UsageFITSBY = .
132
+
133
+ estpost tabstat `varset', statistics(mean) columns(statistics)
134
+ est store us_col
135
+
136
+ * Restore data
137
+ restore
138
+
139
+ * Export table
140
+ esttab sample_col us_col using "output/sample_demographics.tex", ///
141
+ mtitle("\shortstack{Analysis\\sample}" ///
142
+ "\shortstack{U.S.\\adults}") ///
143
+ coeflabels(income "Income (\\$000s)" ///
144
+ college "College" ///
145
+ male "Male" ///
146
+ white "White" ///
147
+ S0_Age "Age" ///
148
+ PD_P1_Usage "Period 1 phone use (minutes/day)" ///
149
+ PD_P1_UsageFITSBY "Period 1 FITSBY use (minutes/day)") ///
150
+ $DESCRIPTIVE_TAB ///
151
+ cells(mean(fmt(%9.1fc %9.2fc %9.2fc %9.2fc %9.1fc %9.1fc %9.1fc)))
152
+
153
+ est clear
154
+ end
155
+
156
+ program sample_demographics_balance
157
+ local varset balance_income balance_college balance_male balance_white balance_age ///
158
+ PD_P1_Usage PD_P1_UsageFITSBY
159
+
160
+ * Sample demographics
161
+ estpost tabstat `varset', statistics(mean) columns(statistics)
162
+ est store sample_col
163
+
164
+ * Preserve data
165
+ preserve
166
+
167
+ local income 43.01
168
+ local college 0.3009
169
+ local male 0.4867
170
+ local white 0.73581
171
+ local age 47.6
172
+
173
+ ebalance balance_income balance_college balance_male balance_white balance_age, ///
174
+ manualtargets(`income' `college' `male' `white' `age') generate(weight)
175
+
176
+ * Winsorize weights
177
+ gen weight2 = weight
178
+ replace weight2 = 2 if weight2 > 2
179
+ replace weight2 = 1/2 if weight2 < 1/2
180
+
181
+ estpost tabstat `varset' [weight=weight2], statistics(mean) columns(statistics)
182
+ est store sample_col_w2
183
+
184
+ * US demographics
185
+ replace balance_income = `income'
186
+ replace balance_college = `college'
187
+ replace balance_male = `male'
188
+ replace balance_white = `white'
189
+ replace balance_age = `age'
190
+ replace PD_P1_Usage = .
191
+ replace PD_P1_UsageFITSBY = .
192
+
193
+ estpost tabstat `varset', statistics(mean) columns(statistics)
194
+ est store us_col
195
+
196
+ * Restore data
197
+ restore
198
+
199
+ * Export table
200
+ esttab sample_col sample_col_w2 us_col ///
201
+ using "output/sample_demographics_balance.tex", ///
202
+ mtitle("\shortstack{Analysis\\sample}" ///
203
+ "\shortstack{Balanced\\sample}" ///
204
+ "\shortstack{U.S.\\adults}" ///
205
+ ) ///
206
+ coeflabels(balance_income "Income (\\$000s)" ///
207
+ balance_college "College" ///
208
+ balance_male "Male" ///
209
+ balance_white "White" ///
210
+ balance_age "Age" ///
211
+ PD_P1_Usage "Period 1 phone use (minutes/day)" ///
212
+ PD_P1_UsageFITSBY "Period 1 FITSBY use (minutes/day)") ///
213
+ $DESCRIPTIVE_TAB ///
214
+ cells(mean(fmt(%9.1fc %9.2fc %9.2fc %9.2fc %9.1fc %9.1fc %9.1fc)))
215
+
216
+ est clear
217
+ end
218
+
219
+ program sample_demographics_balance_all
220
+ local varset balance_income balance_college balance_male balance_white balance_age ///
221
+ PD_P1_Usage PD_P1_UsageFITSBY
222
+
223
+ * Sample demographics
224
+ estpost tabstat `varset', statistics(mean) columns(statistics)
225
+ est store sample_col
226
+
227
+ * Preserve data
228
+ preserve
229
+
230
+ local income 43.01
231
+ local college 0.3009
232
+ local male 0.4867
233
+ local white 0.73581
234
+ local age 47.6
235
+
236
+ ebalance balance_income balance_college balance_male balance_white balance_age, ///
237
+ manualtargets(`income' `college' `male' `white' `age') generate(weight)
238
+
239
+ * Winsorize weights
240
+ gen weight2 = weight
241
+ replace weight2 = 2 if weight2 > 2
242
+ replace weight2 = 1/2 if weight2 < 1/2
243
+
244
+ gen weight3 = weight
245
+ replace weight3 = 3 if weight3 > 3
246
+ replace weight3 = 1/3 if weight3 < 1/3
247
+
248
+ gen weight4 = weight
249
+ replace weight4 = 4 if weight4 > 4
250
+ replace weight4 = 1/4 if weight4 < 1/4
251
+
252
+ gen weight5 = weight
253
+ replace weight5 = 5 if weight5 > 5
254
+ replace weight5 = 1/5 if weight5 < 1/5
255
+
256
+ estpost tabstat `varset' [weight=weight2], statistics(mean) columns(statistics)
257
+ est store sample_col_w2
258
+
259
+ estpost tabstat `varset' [weight=weight3], statistics(mean) columns(statistics)
260
+ est store sample_col_w3
261
+
262
+ estpost tabstat `varset' [weight=weight4], statistics(mean) columns(statistics)
263
+ est store sample_col_w4
264
+
265
+ estpost tabstat `varset' [weight=weight5], statistics(mean) columns(statistics)
266
+ est store sample_col_w5
267
+
268
+ * US demographics
269
+ replace balance_income = `income'
270
+ replace balance_college = `college'
271
+ replace balance_male = `male'
272
+ replace balance_white = `white'
273
+ replace balance_age = `age'
274
+ replace PD_P1_Usage = .
275
+ replace PD_P1_UsageFITSBY = .
276
+
277
+ estpost tabstat `varset', statistics(mean) columns(statistics)
278
+ est store us_col
279
+
280
+ * Restore data
281
+ restore
282
+
283
+ * Export table
284
+ esttab us_col sample_col sample_col_w2 sample_col_w3 sample_col_w4 sample_col_w5 ///
285
+ using "output/sample_demographics_balance_all.tex", ///
286
+ mtitle("\shortstack{U.S.\\adults}" ///
287
+ "\shortstack{Analysis\\sample}" ///
288
+ "\shortstack{(w=2)}" ///
289
+ "\shortstack{(w=3)}" ///
290
+ "\shortstack{(w=4)}" ///
291
+ "\shortstack{(w=5)}" ///
292
+ ) ///
293
+ coeflabels(balance_income "Income (\\$000s)" ///
294
+ balance_college "College" ///
295
+ balance_male "Male" ///
296
+ balance_white "White" ///
297
+ balance_age "Age" ///
298
+ PD_P1_Usage "Period 1 use (min/day)" ///
299
+ PD_P1_UsageFITSBY "Period 1 FITSBY use (min/day)") ///
300
+ $DESCRIPTIVE_TAB ///
301
+ cells(mean(fmt(%9.1fc %9.2fc %9.2fc %9.2fc %9.1fc %9.1fc %9.1fc)))
302
+
303
+ est clear
304
+ end
305
+
306
+
307
+ program limit_attrition
308
+ local varset ///
309
+ S3_Finished ///
310
+ S4_Finished ///
311
+ I_P2_Usage ///
312
+ I_P3_Usage ///
313
+ I_P4_Usage ///
314
+ I_P5_Usage
315
+
316
+ * Preserve data
317
+ preserve
318
+
319
+ * Use old sample definition
320
+ use "input/final_data.dta", clear
321
+ keep if S2_RevealConfirm == 1 & S3_Bonus <= 1
322
+ foreach time in S3 S4 {
323
+ replace `time'_Finished = 0 if `time'_Finished == .
324
+ }
325
+
326
+ * Create usage indicators
327
+ foreach time in P2 P3 P4 P5 {
328
+ gen I_`time'_Usage = 0
329
+ replace I_`time'_Usage = 1 if PD_`time'_Usage != .
330
+ }
331
+
332
+ * Attrition by limit group
333
+ forvalues i = 0/5 {
334
+ local if if S2_LimitType == `i'
335
+ estpost tabstat `varset' `if', statistics(mean) columns(statistics)
336
+ est store attrition_b`i'
337
+ }
338
+
339
+ * Attrition for limit groups
340
+ local if if S2_LimitType != 0
341
+ estpost tabstat `varset' `if', statistics(mean) columns(statistics)
342
+ est store attrition_b
343
+
344
+ * F-test for limit groups
345
+ foreach var of varlist `varset' {
346
+ reg `var' i.S2_LimitType
347
+ local fvalue = Ftail(e(df_m), e(df_r), e(F))
348
+ replace `var' = `fvalue'
349
+ }
350
+ estpost tabstat `varset', statistics(mean) columns(statistics)
351
+ est store fval_b
352
+
353
+ * Export limit attrition table
354
+ esttab attrition_b0 attrition_b ///
355
+ attrition_b1 attrition_b2 ///
356
+ attrition_b3 attrition_b4 ///
357
+ attrition_b5 fval_b ///
358
+ using "output/attrition_limit.tex", ///
359
+ mtitle("\shortstack{Control}" ///
360
+ "\shortstack{All\\limits}" ///
361
+ "\shortstack{Snooze\\0}" ///
362
+ "\shortstack{Snooze\\2}" ///
363
+ "\shortstack{Snooze\\5}" ///
364
+ "\shortstack{Snooze\\20}" ///
365
+ "\shortstack{No\\snooze}" ///
366
+ "\shortstack{F-test\\p-value}") ///
367
+ coeflabels(S3_Finished "Completed survey 3" ///
368
+ S4_Finished "Completed survey 4" ///
369
+ I_P2_Usage "Have period 2 usage" ///
370
+ I_P3_Usage "Have period 3 usage" ///
371
+ I_P4_Usage "Have period 4 usage" ///
372
+ I_P5_Usage "Have period 5 usage") ///
373
+ $DESCRIPTIVE_TAB ///
374
+ cells(mean(fmt(%9.2fc)))
375
+
376
+ est clear
377
+
378
+ * Restore data
379
+ restore
380
+ end
381
+
382
+ program bonus_attrition
383
+ local varset ///
384
+ S3_Finished ///
385
+ S4_Finished ///
386
+ I_P2_Usage ///
387
+ I_P3_Usage ///
388
+ I_P4_Usage ///
389
+ I_P5_Usage
390
+
391
+ * Preserve data
392
+ preserve
393
+
394
+ * Use old sample definition
395
+ use "input/final_data.dta", clear
396
+ keep if S2_RevealConfirm == 1 & S3_Bonus <= 1
397
+ foreach time in S3 S4 {
398
+ replace `time'_Finished = 0 if `time'_Finished == .
399
+ }
400
+
401
+ keep if S3_Bonus != 2
402
+
403
+ * Create usage indicators
404
+ foreach time in P2 P3 P4 P5 {
405
+ gen I_`time'_Usage = 0
406
+ replace I_`time'_Usage = 1 if PD_`time'_Usage != .
407
+ }
408
+
409
+ * Attrition by bonus group
410
+ forvalues i = 0 / 1 {
411
+ local if if S3_Bonus == `i'
412
+ estpost tabstat `varset' `if', statistics(mean) columns(statistics)
413
+ est store attrition_bonus`i'
414
+ }
415
+
416
+ * T-test for bonus groups
417
+ foreach var of varlist `varset' {
418
+ capture prtest `var', by(S3_Bonus)
419
+
420
+ if _rc == 0 {
421
+ local diff = -1 * r(P_diff)
422
+ local pval = r(p)
423
+ gen `var'_d = `diff'
424
+ gen `var'_p = `pval'
425
+ }
426
+ else {
427
+ gen `var'_d = 0
428
+ gen `var'_p = .
429
+ }
430
+ }
431
+
432
+ * Append bonus differences
433
+ foreach var of varlist `varset' {
434
+ replace `var' = `var'_d
435
+ }
436
+ estpost tabstat `varset', statistics(mean) columns(statistics)
437
+ est store diff_bonus
438
+
439
+ * Append bonus p-values
440
+ foreach var of varlist `varset' {
441
+ replace `var' = `var'_p
442
+ }
443
+ estpost tabstat `varset', statistics(mean) columns(statistics)
444
+ est store pval_bonus
445
+
446
+ display("here")
447
+
448
+ * Export Bonus attrition table
449
+ esttab attrition_bonus0 attrition_bonus1 pval_bonus using "output/attrition_bonus.tex", ///
450
+ mtitle("\shortstack{Control}" ///
451
+ "\shortstack{Treatment}" ///
452
+ "\shortstack{t-test\\p-value}") ///
453
+ coeflabels(S3_Finished "Completed survey 3" ///
454
+ S4_Finished "Completed survey 4" ///
455
+ I_P2_Usage "Have period 2 usage" ///
456
+ I_P3_Usage "Have period 3 usage" ///
457
+ I_P4_Usage "Have period 4 usage" ///
458
+ I_P5_Usage "Have period 5 usage") ///
459
+ $DESCRIPTIVE_TAB ///
460
+ cells(mean(fmt(%9.2fc)))
461
+
462
+ est clear
463
+
464
+ * Restore data
465
+ restore
466
+ end
467
+
468
+ program balance
469
+ local varset income college male white S0_Age PD_P1_UsageFITSBY
470
+
471
+ iebaltab_edit `varset', ///
472
+ grpvar(limit_T) ///
473
+ savetex("output/balance_limit.tex") ///
474
+ $BALANCE_TAB
475
+
476
+ iebaltab_edit `varset', ///
477
+ grpvar(S3_Bonus) ///
478
+ savetex("output/balance_bonus.tex") ///
479
+ $BALANCE_TAB
480
+
481
+ * panelcombine, ///
482
+ * use(output/balance_limit.tex ///
483
+ * output/balance_bonus.tex) ///
484
+ * paneltitles("Limit Treatment" ///
485
+ * "Bonus Treatment") ///
486
+ * columncount(4) ///
487
+ * save("output/balance.tex") cleanup
488
+ end
489
+
490
+ program historical_use
491
+ syntax, [fitsby]
492
+
493
+ if ("`fitsby'" == "fitsby") {
494
+ local fitsby "FITSBY"
495
+ local suffix "_fitsby"
496
+ local word "FITSBY"
497
+ }
498
+
499
+ else {
500
+ local fitsby ""
501
+ local suffix ""
502
+ local word "phone"
503
+ }
504
+
505
+ local var PD_P1_Usage`fitsby'
506
+ label var PD_P1_Usage`fitsby' "Period 1 `word' use (minutes/day)"
507
+
508
+ local label : var label `var'
509
+ sum `var', d
510
+
511
+ twoway histogram `var', frac ///
512
+ ytitle("Fraction of sample" " ") ///
513
+ xtitle(" " "`label'") ///
514
+ $HIST_CONTINUOUS_SETTINGS
515
+
516
+ graph export "output/hist_baseline_usage`suffix'.pdf", replace
517
+ end
518
+
519
+ program summary_welfare
520
+ local varset ///
521
+ S1_PhoneUseChange ///
522
+ S1_AddictionIndex ///
523
+ S1_SMSIndex ///
524
+ S1_LifeBetter ///
525
+ S1_SWBIndex
526
+
527
+
528
+ estpost tabstat `varset', ///
529
+ statistics(mean, sd, max, min) columns(statistics)
530
+
531
+ est store baseline
532
+
533
+ esttab baseline using "output/baseline_welfare.tex", ///
534
+ $DESCRIPTIVE_TAB_DETAILED ///
535
+ coeflabels(S1_PhoneUseChange "Ideal use change" ///
536
+ S1_AddictionIndex "Addiction scale x (-1)" ///
537
+ S1_SMSIndex "SMS addiction scale x (-1)" ///
538
+ S1_LifeBetter "Phone makes life better" ///
539
+ S1_SWBIndex "Subjective well-being")
540
+ end
541
+
542
+ program share_use_by_app
543
+ * Preserve data
544
+ preserve
545
+
546
+ * Reshape data
547
+ keep UserID PD_P1_Usage_* PD_P1_Installed_*
548
+ drop *Other *_H*
549
+ reshape long PD_P1_Usage_ PD_P1_Installed_ , i(UserID) j(app) s
550
+ replace PD_P1_Usage_ = 0 if PD_P1_Usage_ == .
551
+
552
+ * Collapse data
553
+ collapse (mean) PD_P1_Usage_ PD_P1_Installed_, by(app)
554
+ gsort -PD_P1_Usage_
555
+ gen order = _n
556
+
557
+ cap drop appname1 appname2
558
+ gen appname1 = _n - 0.2
559
+ gen appname2 = _n + 0.2
560
+
561
+ local N = _N
562
+ forvalues i = 1/`N' {
563
+ local t`i' = app[`i']
564
+ }
565
+
566
+ * Plot data
567
+ twoway bar PD_P1_Installed_ appname1, ///
568
+ fintensity(inten50) barw(0.35) ///
569
+ yaxis(1) yscale(axis(1) range(0)) ylabel(0(0.2)1, axis(1)) ///
570
+ xlabel(1 "`t1'" 2 "`t2'" 3 "`t3'" 4 "`t4'" 5 "`t5'" ///
571
+ 6 "`t6'" 7 "`t7'" 8 "`t8'" 9 "`t9'" 10 "`t10'" ///
572
+ 11 "`t11'" 12 "`t12'" 13 "`t13'" 14 "`t14'", ///
573
+ valuelabel angle(45)) || ///
574
+ bar PD_P1_Usage_ appname2, ///
575
+ fintensity(inten100) barw(0.35) ///
576
+ yaxis(2) yscale(axis(2) range(0)) ylabel(#5, axis(2)) ///
577
+ xtitle("") ytitle("Share of users", axis(1)) ytitle("Minutes/day", axis(2)) ///
578
+ legend(label(1 "Users at baseline") ///
579
+ label(2 "Period 1 use") ///
580
+ $BAR_SETTINGS
581
+
582
+ graph export "output/bar_share_use_by_app.pdf", replace
583
+
584
+ * Restore data
585
+ restore
586
+ end
587
+
588
+ program addiction_plot
589
+ * Preserve data
590
+ preserve
591
+
592
+ * Reshape data
593
+ keep UserID *_Addiction_*
594
+ keep if S3_Addiction_1 != .
595
+
596
+ foreach i in 3 {
597
+ forvalues j = 1/16 {
598
+ gen S`i'_Addiction_Binary_`j' = S`i'_Addiction_`j' > 0.5
599
+
600
+ }
601
+ }
602
+
603
+ keep UserID S3_Addiction_Binary_*
604
+
605
+ reshape long S3_Addiction_Binary_ , i(UserID) j(question)
606
+
607
+ rename S3_Addiction_Binary_ S3_Addiction
608
+
609
+ * Collapse data
610
+ collapse (mean) S3_Addiction , by(question)
611
+
612
+ gen order = _n
613
+
614
+ cap drop qname
615
+ gen qname = _N - _n + 1
616
+
617
+ gen category = qname < 9
618
+
619
+
620
+ * Plot data
621
+ twoway bar S3_Addiction qname, ///
622
+ fintensity(inten100) barw(0.6) bcolor(maroon) ///
623
+ yaxis(1) yscale(axis(1) range(0)) xlabel(0(0.2)1, axis(1)) ///
624
+ ylabel(1 "Procrastinate by using phone" 2 "Prefer phone to human interaction" ///
625
+ 3 "Lose sleep from use" 4 "Harms school/work performance" ///
626
+ 5 "Annoyed at interruption in use" 6 "Difficult to put down phone" ///
627
+ 7 "Feel anxious without phone" 8 "Others are concerned about use" ///
628
+ 9 "Try and fail to reduce use" 10 "Use to relax to go to sleep" ///
629
+ 11 "Use to distract from anxiety/etc." 12 "Use to distract from personal issues" ///
630
+ 13 "Tell yourself just a few more minutes" 14 "Use longer than intended" ///
631
+ 15 "Wake up, check phone immediately" 16 "Fear missing out online", ///
632
+ valuelabel angle(0)) horizontal ///
633
+ ytitle(" relapse, withdrawal, conflict salience, tolerance, mood", size(small)) ///
634
+ xtitle(`"Share of people who "often" or "always""', axis(1)) ///
635
+ legend(label(1 "Survey 3") ///
636
+ $BAR_SETTINGS
637
+
638
+ graph export "output/addiction.pdf", replace
639
+
640
+
641
+ * Plot data
642
+ twoway bar S3_Addiction qname, ///
643
+ fintensity(inten100) barw(0.75) bcolor(maroon) ///
644
+ yaxis(1) yscale(axis(1) range(0)) xlabel(0(0.2)0.8, axis(1)) ///
645
+ xlabel(, labsize(large)) ///
646
+ ylabel(1 "Procrastinate by using phone" 2 "Prefer phone to human interaction" ///
647
+ 3 "Lose sleep from use" 4 "Harms school/work performance" ///
648
+ 5 "Annoyed at interruption in use" 6 "Difficult to put down phone" ///
649
+ 7 "Feel anxious without phone" 8 "Others are concerned about use" ///
650
+ 9 "Try and fail to reduce use" 10 "Use to relax to go to sleep" ///
651
+ 11 "Use to distract from anxiety/etc." 12 "Use to distract from personal issues" ///
652
+ 13 "Tell yourself just a few more minutes" 14 "Use longer than intended" ///
653
+ 15 "Wake up, check phone immediately" 16 "Fear missing out online", ///
654
+ valuelabel angle(0) labsize(large)) horizontal ///
655
+ ytitle(, size(zero)) ///
656
+ xtitle(`"Share of people who "often" or "always""', axis(1) justification(right) size(large)) ///
657
+ legend(label(1 "Survey 3") ///
658
+ region(lcolor(white))) graphregion(color(white)) ///
659
+ xsize(6.5) ysize(4.5 )
660
+
661
+ graph export "output/addiction_large.pdf", replace
662
+ end
663
+
664
+ ***********
665
+ * Execute *
666
+ ***********
667
+
668
+ main
17/replication_package/code/analysis/descriptive/code/HeatmapPlots.R ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ library(ggplot2)
2
+ library(tidyverse)
3
+ library(haven)
4
+
5
+ maroon <- '#94343c'
6
+ grey <- '#848484'
7
+
8
+ low_grey <- "grey90"
9
+
10
+ plot_wtp_prediction <- function(df){
11
+
12
+ # Tally the bins. Create bins centered at 5, 15, 25, etc.
13
+ counted <- df %>%
14
+ mutate(S2_PredictUseBonusEarnBin = S2_PredictUseBonusEarn - (S2_PredictUseBonusEarn %% 10) + 5) %>%
15
+ select(UserID, S2_PredictUseBonusEarnBin, S2_MPL) %>%
16
+ group_by(S2_MPL, S2_PredictUseBonusEarnBin) %>%
17
+ count(name="Count")
18
+
19
+ # Create an empty dataframe of all of the index combinations
20
+ mpls <- unique(counted$S2_MPL)
21
+ pred <- unique(counted$S2_PredictUseBonusEarnBin)
22
+
23
+ S2_MPL <- rep(mpls, length(pred))
24
+ S2_PredictUseBonusEarnBin <- rep(pred, each=length(mpls))
25
+
26
+ empty <- data.frame(S2_MPL, S2_PredictUseBonusEarnBin)
27
+
28
+ # replaces the non-missing
29
+ full <- empty %>%
30
+ left_join(counted, by= c('S2_MPL', 'S2_PredictUseBonusEarnBin')) %>%
31
+ mutate(Count=ifelse(is.na(Count), 0, Count))
32
+
33
+ #plots
34
+ a <- full %>%
35
+ ggplot(aes(S2_MPL, S2_PredictUseBonusEarnBin, fill= Count)) +
36
+ geom_tile() +
37
+ scale_fill_gradient(low = low_grey, high = maroon) +
38
+ theme_classic() +
39
+ labs(x= "Valuation of bonus ($)", y = "Predicted earnings from bonus ($)") +
40
+ geom_abline(intercept = 0, slope=1)
41
+
42
+ ggsave('output/heatmap_wtp_prediction.pdf', plot=a, width=6.5, height=4.5, units="in")
43
+ }
44
+
45
+ plot_predicted_actual <- function(df, period){
46
+ bin_size <- 20
47
+
48
+ # filter to just control
49
+ data <- df %>%
50
+ filter(B == 0 & L == 0)
51
+
52
+ #rename
53
+ data %<>% mutate(Predicted = !!sym(paste0('S', period, '_PredictUseNext_1'))) %>%
54
+ mutate(Actual = !!sym(paste0('PD_P', period, '_UsageFITSBY'))) %>%
55
+ filter(!is.na(Predicted) & !is.na(Actual))
56
+
57
+ counts <- data %>%
58
+ mutate(PredictedBin = Predicted - (Predicted %% bin_size) + (bin_size/2)) %>%
59
+ mutate(ActualBin = Actual - (Actual %% bin_size) + (bin_size/2)) %>%
60
+ select(PredictedBin, ActualBin) %>%
61
+ group_by(PredictedBin, ActualBin) %>%
62
+ count(name="Count")
63
+
64
+ #plots
65
+ a <- counts %>%
66
+ ggplot(aes(PredictedBin, ActualBin, fill= Count)) +
67
+ geom_tile() +
68
+ scale_fill_gradient(low = low_grey, high = maroon) +
69
+ theme_classic() +
70
+ labs(x= "Predicted FITSBY use (minutes/day)", y = "Actual FITSBY use (minutes/day)") +
71
+ geom_abline(intercept = 0, slope=1) +
72
+ xlim(0, 500) + ylim(0, 500)
73
+
74
+ ggsave(sprintf('output/heatmap_usage_P%s.pdf', period), plot=a, width=6.5, height=4.5, units="in")
75
+
76
+ }
77
+
78
+ plot_predicted_actual_all <- function(df){
79
+ bin_size <- 20
80
+
81
+ # filter to just control
82
+ data <- df %>%
83
+ filter(B == 0 & L == 0)
84
+
85
+ #rename
86
+ p2 <- data %>% mutate(Predicted = S2_PredictUseNext_1) %>%
87
+ mutate(Actual = PD_P2_UsageFITSBY) %>%
88
+ filter(!is.na(Predicted) & !is.na(Actual)) %>%
89
+ select(Predicted, Actual)
90
+
91
+ p3 <- data %>% mutate(Predicted = S3_PredictUseNext_1) %>%
92
+ mutate(Actual = PD_P3_UsageFITSBY) %>%
93
+ filter(!is.na(Predicted) & !is.na(Actual)) %>%
94
+ select(Predicted, Actual)
95
+
96
+ p4 <- data %>% mutate(Predicted = S4_PredictUseNext_1) %>%
97
+ mutate(Actual = PD_P4_UsageFITSBY) %>%
98
+ filter(!is.na(Predicted) & !is.na(Actual)) %>%
99
+ select(Predicted, Actual)
100
+
101
+ all_periods <- rbind(p2, p3, p4)
102
+
103
+ counts <- all_periods %>%
104
+ mutate(PredictedBin = Predicted - (Predicted %% bin_size) + (bin_size/2)) %>%
105
+ mutate(ActualBin = Actual - (Actual %% bin_size) + (bin_size/2)) %>%
106
+ select(PredictedBin, ActualBin) %>%
107
+ group_by(PredictedBin, ActualBin) %>%
108
+ count(name="Count")
109
+
110
+ #plots
111
+ a <- counts %>%
112
+ ggplot(aes(PredictedBin, ActualBin, fill= Count)) +
113
+ geom_tile() +
114
+ scale_fill_gradient(low = low_grey, high = maroon) +
115
+ theme_classic() +
116
+ labs(x= "Predicted FITSBY use (minutes/day)", y = "Actual FITSBY use (minutes/day)") +
117
+ geom_abline(intercept = 0, slope=1) +
118
+ xlim(0, 500) + ylim(0, 500)
119
+
120
+ ggsave('output/heatmap_usage.pdf', plot=a, width=6.5, height=4.5, units="in")
121
+
122
+ }
123
+
124
+ main <- function(){
125
+ df <- read_dta('input/final_data_sample.dta')
126
+
127
+ # clean data
128
+ df %<>%
129
+ mutate(L = ifelse(S2_LimitType != 0, 1, 0)) %>%
130
+ mutate(B = ifelse(S3_Bonus == 1, 1, 0)) %>%
131
+ mutate(S = as.character(Stratifier))
132
+
133
+ plot_wtp_prediction(df)
134
+
135
+ plot_predicted_actual(df, 2)
136
+ plot_predicted_actual(df, 3)
137
+ plot_predicted_actual(df, 4)
138
+
139
+ plot_predicted_actual_all(df)
140
+
141
+ }
142
+
143
+
144
+ main()
17/replication_package/code/analysis/descriptive/code/QualitativeEvidence.do ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Baseline qualitative evidence
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ end
18
+
19
+ program define_plot_settings
20
+ global HIST_SETTINGS ///
21
+ xlabel(, labsize(large)) ///
22
+ ylabel(, labsize(large)) ///
23
+ ytitle("Fraction of sample" " ", size(large)) ///
24
+ bcolor(maroon) graphregion(color(white)) ///
25
+ xsize(6.5) ysize(4.5)
26
+
27
+ global HIST_DISCRETE_SETTINGS ///
28
+ gap(50) xlabel(, valuelabel noticks) ///
29
+ $HIST_SETTINGS
30
+
31
+ global HIST_CONTINUOUS_SETTINGS ///
32
+ $HIST_SETTINGS
33
+
34
+ global CISPIKE_VERTICAL_GRAPHOPTS ///
35
+ ylabel(#6) ///
36
+ xsize(6.5) ysize(4.5)
37
+
38
+ global CISPIKE_SETTINGS ///
39
+ spikecolor(maroon black gray) ///
40
+ cicolor(maroon black gray)
41
+ end
42
+
43
+ **********************
44
+ * Analysis functions *
45
+ **********************
46
+
47
+ program main
48
+ define_constants
49
+ define_plot_settings
50
+ import_data
51
+
52
+ plot_self_control
53
+ plot_self_control_by_age
54
+ end
55
+
56
+ program import_data
57
+ use "input/final_data_sample.dta", clear
58
+ end
59
+
60
+ program plot_self_control
61
+ twoway hist S1_InterestInLimits, frac discrete ///
62
+ $HIST_DISCRETE_SETTINGS ///
63
+ xtitle(" " "Interest in limits", size(large))
64
+
65
+ graph export "output/hist_limits_interest.pdf", replace
66
+
67
+ twoway hist S1_PhoneUseChange, frac ///
68
+ $HIST_CONTINUOUS_SETTINGS ///
69
+ width(5) start(-102.5) ///
70
+ xtitle(" " "Ideal use change (percent)", size(large))
71
+
72
+ graph export "output/hist_phone_use.pdf", replace
73
+
74
+ twoway hist S1_LifeBetter, frac discrete ///
75
+ $HIST_CONTINUOUS_SETTINGS ///
76
+ xtitle(" " "Phone use makes life worse (left) or better (right)", size(large)) ///
77
+ xtick(-5(2.5)5) xlabel(-5(5)5)
78
+
79
+ graph export "output/hist_life_betterworse.pdf", replace
80
+
81
+ hist S1_AddictionIndex, frac ///
82
+ $HIST_CONTINUOUS_SETTINGS ///
83
+ xtitle(" " "Addiction scale", size(large))
84
+
85
+ graph export "output/hist_addiction_index.pdf", replace
86
+
87
+
88
+ hist S1_SMSIndex, frac ///
89
+ $HIST_CONTINUOUS_SETTINGS ///
90
+ xtitle(" " "SMS addiction scale", size(large))
91
+
92
+ graph export "output/hist_sms_index.pdf", replace
93
+
94
+ end
95
+
96
+ program plot_self_control_by_age
97
+ * Preserve data
98
+ preserve
99
+
100
+ * Reshape data
101
+ keep UserID AgeGroup PD_P1_UsageFITSBY Strat*Index
102
+ rename_but, varlist(UserID AgeGroup) prefix(index)
103
+ reshape long index, i(UserID AgeGroup) j(measure) string
104
+
105
+ * Recode data
106
+ encode measure, generate(measure_encode)
107
+
108
+ recode measure_encode ///
109
+ (2 = 1 "Addiction index") ///
110
+ (3 = 2 "Restriction index") ///
111
+ (1 = 3 "Period 1 FITSBY Usage"), ///
112
+ gen(measure_recode)
113
+
114
+ * Define plot settings
115
+
116
+ // - When creating multiple y-axis plots, Stata unfortunately makes no
117
+ // attempt to align the different y-axes
118
+ // - Manually adjust the follaowing options to properly align the y-axes
119
+ // - Note that values for legend order are also manually specified
120
+ // (but do not need to be adjusted) as including multiple y-axes jumbles
121
+ // the legend order expected by the cispike command
122
+ local ylabel1 -.4(.2).6
123
+ local ylabel2 100(10)200
124
+ local yrange2 range(100, 200)
125
+
126
+ * Plot data
127
+
128
+ cispike index, ///
129
+ over1(measure_recode) over2(AgeGroup) ///
130
+ $CISPIKE_SETTINGS ///
131
+ spike( yaxis(1) || yaxis(1) || yaxis(2)) ci( yaxis(1) || yaxis(1) || yaxis(2)) ///
132
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
133
+ ytitle("Standard deviations" " ", axis(1)) ///
134
+ ytitle(" " "Usage (minutes/day)", axis(2)) ///
135
+ ylabel(`ylabel1', axis(1)) ///
136
+ ylabel(`ylabel2', axis(2)) ///
137
+ yscale(`yrange2' axis(2)) ///
138
+ legend(order(11 "Addiction index" ///
139
+ 16 "Restriction index" ///
140
+ 26 "Period 1 FITSBY Usage")))
141
+
142
+ graph export "output/cispike_self_control_index_by_age.pdf", replace
143
+
144
+ * Restore data
145
+ restore
146
+ end
147
+
148
+ ***********
149
+ * Execute *
150
+ ***********
151
+
152
+ main
17/replication_package/code/analysis/descriptive/code/SampleStatistics.do ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Sample statistics
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ end
18
+
19
+ program latex
20
+ syntax, name(str) value(str)
21
+
22
+ local command = "\newcommand{\\`name'}{`value'}"
23
+
24
+ file open scalars using "output/scalars.tex", write append
25
+ file write scalars `"`command'"' _n
26
+ file close scalars
27
+ end
28
+
29
+ program latex_integer
30
+ syntax, name(str) value(str)
31
+
32
+ local value : display %8.0gc `value'
33
+ local value = trim("`value'")
34
+
35
+ latex, name(`name') value(`value')
36
+ end
37
+
38
+ **********************
39
+ * Analysis functions *
40
+ **********************
41
+
42
+ program main
43
+ define_constants
44
+ import_data
45
+
46
+ get_samples
47
+ end
48
+
49
+ program import_data
50
+ use "input/final_data.dta", clear
51
+ end
52
+
53
+ program get_samples
54
+ cap sencode UserID, replace
55
+
56
+ * Shown ad
57
+ latex, name(shownad) value("3,271,165")
58
+
59
+ * Clicked on ad
60
+ sum UserID if S0_Finished != .
61
+ latex_integer, name(clickedonad) value(`r(N)')
62
+
63
+ * Passed pre-screen
64
+ sum UserID if S0_Android == 1 & S0_Country == 1 & S0_Age >= 18 & S0_Age < 65 & ///
65
+ S0_PhoneCount == 1 & S0_Android == 1
66
+ latex_integer, name(passedprescreen) value(`r(N)')
67
+
68
+ * Consented
69
+ sum UserID if S0_Consent == 1
70
+ latex_integer, name(consented) value(`r(N)')
71
+
72
+ * Finished intake
73
+ sum UserID if S0_Finished == 1 & S0_Consent == 1
74
+ latex_integer, name(finishedintake) value(`r(N)')
75
+
76
+ * Began baseline
77
+ sum UserID if S1_Finished != .
78
+ latex_integer, name(beganbaseline) value(`r(N)')
79
+
80
+ * Finished baseline
81
+ sum UserID if S1_Finished == 1
82
+ latex_integer, name(finishedbaseline) value(`r(N)')
83
+ local finishedbaseline `r(N)'
84
+
85
+ * Randomized
86
+ sum UserID if S1_Finished == 1 & Randomize == 1
87
+ latex_integer, name(randomized) value(`r(N)')
88
+ local randomized `r(N)'
89
+
90
+ * Dropped from baseline
91
+ local dropped = `finishedbaseline' - `randomized'
92
+ latex_integer, name(droppedbaseline) value(`dropped')
93
+
94
+ * Began midline
95
+ sum UserID if S2_Finished != .
96
+ latex_integer, name(beganmidline) value(`r(N)')
97
+
98
+ * Informed of treatment
99
+ sum UserID if S2_RevealConfirm == 1
100
+ latex_integer, name(informedtreat) value(`r(N)')
101
+
102
+ * Finished midline
103
+ sum UserID if S2_Finished == 1 & S2_RevealConfirm == 1
104
+ latex_integer, name(finishedmidline) value(`r(N)')
105
+
106
+ * Began endline
107
+ sum UserID if S3_Finished != .
108
+ latex_integer, name(beganendline) value(`r(N)')
109
+
110
+ * Finished endline
111
+ sum UserID if S3_Finished == 1
112
+ latex_integer, name(finishedendline) value(`r(N)')
113
+
114
+ * Began post-endline
115
+ sum UserID if S4_Finished != .
116
+ latex_integer, name(beganpostendline) value(`r(N)')
117
+
118
+ * Finished endline
119
+ sum UserID if S4_Finished == 1
120
+ latex_integer, name(finishedpostendline) value(`r(N)')
121
+
122
+ sum UserID if S4_Finished == 1 & PD_P5_Usage != .
123
+ latex_integer, name(kepttoend) value(`r(N)')
124
+
125
+ * Analytical sizes
126
+ sum UserID if S2_RevealConfirm == 1 & S3_Bonus <= 1
127
+ latex_integer, name(informedtreatanalysis) value(`r(N)')
128
+
129
+ sum UserID if S2_RevealConfirm == 1 & S3_Bonus <= 1 & PD_P5_Usage != . & S4_Finished == 1
130
+ latex_integer, name(kepttoendanalysis) value(`r(N)')
131
+
132
+ end
133
+
134
+ ***********
135
+ * Execute *
136
+ ***********
137
+
138
+ main
17/replication_package/code/analysis/descriptive/code/Scalars.do ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Ad hoc scalars for text of main paper
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ yaml global STRATA = YAML.metadata.strata
18
+ end
19
+
20
+ program latex
21
+ syntax, name(str) value(str)
22
+
23
+ local command = "\newcommand{\\`name'}{`value'}"
24
+
25
+ file open scalars using "output/scalars.tex", write append
26
+ file write scalars `"`command'"' _n
27
+ file close scalars
28
+ end
29
+
30
+ program latex_rounded
31
+ syntax, name(str) value(str) digits(str)
32
+
33
+ local value : display %8.`digits'fc `value'
34
+ local value = trim("`value'")
35
+
36
+ latex, name(`name') value(`value')
37
+ end
38
+
39
+ program latex_precision
40
+ syntax, name(str) value(str) digits(str)
41
+
42
+ autofmt, input(`value') dec(`digits') strict
43
+ local value = r(output1)
44
+
45
+ latex, name(`name') value(`value')
46
+ end
47
+
48
+ program reshape_swb
49
+ * Reshape wide to long
50
+ keep UserID S3_Bonus S2_LimitType Stratifier S*_SWBIndex_N
51
+
52
+ local indep UserID S3_Bonus S2_LimitType Stratifier S1_*
53
+ rename_but, varlist(`indep') prefix(outcome)
54
+ reshape long outcome, i(`indep') j(measure) string
55
+
56
+ split measure, p(_)
57
+ replace measure = measure2 + "_" + measure3
58
+ rename measure1 survey
59
+ drop measure2 measure3
60
+
61
+ * Reshape long to wide
62
+ reshape wide outcome, i(UserID survey) j(measure) string
63
+ rename outcome* *
64
+
65
+ * Recode data
66
+ encode survey, gen(S)
67
+
68
+ * Label data
69
+ label var SWBIndex "Subjective well-being"
70
+ end
71
+
72
+ **********************
73
+ * Analysis functions *
74
+ **********************
75
+
76
+ program main
77
+ define_constants
78
+ import_sample_data
79
+
80
+ get_usage_info_open
81
+ get_percent_fitsby
82
+ get_percent_limit
83
+ get_ideal_use
84
+ get_life_worse
85
+ get_addict
86
+ get_bonus_effect
87
+ get_limit_effect
88
+ get_valuations
89
+ get_baseline_usage
90
+ get_compare2019
91
+ get_substitution
92
+ get_swb_pvalues
93
+ get_bonus_desire
94
+ get_pd_usage
95
+ get_medians
96
+ get_bound_use
97
+
98
+
99
+ * import_data
100
+ * get_other_blocker_use
101
+
102
+ end
103
+
104
+ program import_sample_data
105
+ use "input/final_data_sample.dta", clear
106
+ end
107
+
108
+ program import_data
109
+ use "input/final_data.dta", clear
110
+ end
111
+
112
+ program tab_percent
113
+ syntax, var(str) key(str) name(str) digits(str)
114
+
115
+ * Generate dummy
116
+ cap drop dummy
117
+ gen dummy = 0
118
+ replace dummy = 1 if inlist(`var', `key')
119
+
120
+ * Tabulate dummy
121
+ sum dummy
122
+ local perc = `r(mean)' * 100
123
+ latex_rounded, name(`name') value(`perc') digits(`digits')
124
+ end
125
+
126
+ program get_usage_info_open
127
+ latex, name(usageinfoopen) value("XXX") // WIP
128
+ end
129
+
130
+ program get_percent_fitsby
131
+ * Preserve data
132
+ preserve
133
+
134
+ * Reshape data
135
+ keep UserID PD_*_Usage_* PD_*_Installed_*
136
+ keep UserID *Facebook *Instagram *Twitter *Snapchat *Browser *YouTube
137
+ rename_but, varlist(UserID) prefix(use)
138
+ reshape long use, i(UserID) j(j) string
139
+
140
+ split j, p(_)
141
+ rename j4 app
142
+
143
+ * Get apps used
144
+ collapse (sum) use, by(UserID app)
145
+ replace use = 1 if use > 0 & use != .
146
+
147
+ * Get number of apps used
148
+ collapse (sum) use, by(UserID)
149
+
150
+ * Get percent all apps used
151
+ tab_percent, ///
152
+ var(use) key(6) ///
153
+ name(percentfitsby) digits(1)
154
+
155
+ * Restore data
156
+ restore
157
+ end
158
+
159
+ program get_percent_limit
160
+ * Get percent moderately or very interested
161
+ tab_percent, ///
162
+ var(S1_InterestInLimits) key(3, 4) ///
163
+ name(percentlimitinterested) digits(0)
164
+
165
+ * Get percent not at all interested
166
+ tab_percent, ///
167
+ var(S1_InterestInLimits) key(1) ///
168
+ name(percentlimitnot) digits(0)
169
+ end
170
+
171
+ program get_ideal_use
172
+ * Get percent just right
173
+ tab_percent, ///
174
+ var(S1_PhoneUseFeel) key(2) ///
175
+ name(percentuseright) digits(0)
176
+
177
+ * Get percent too little
178
+ tab_percent, ///
179
+ var(S1_PhoneUseFeel) key(3) ///
180
+ name(percentuselittle) digits(1)
181
+
182
+ * Get mean total ideal reduction
183
+ sum S1_PhoneUseReduce
184
+ local mean = r(mean)
185
+ latex_rounded, name(idealreduction) value(`mean') digits(0)
186
+
187
+ * Get mean Facebook ideal reduction
188
+ recode S1_IdealApp_Facebook ///
189
+ (1 = -75 ) ///
190
+ (2 = -37.5) ///
191
+ (3 = -12.5) ///
192
+ (4 = 0 ) ///
193
+ (5 = 12.5) ///
194
+ (6 = 37.5) ///
195
+ (7 = 75 ) ///
196
+ (8 = 0 ), ///
197
+ gen(S1_IdealApp_Facebook_recode)
198
+
199
+ sum S1_IdealApp_Facebook_recode
200
+ local mean = r(mean) * -1
201
+ latex_rounded, name(idealreductionfacebook) value(`mean') digits(0)
202
+
203
+ * Get mean Instagram ideal reduction
204
+ recode S1_IdealApp_Instagram ///
205
+ (1 = -75 ) ///
206
+ (2 = -37.5) ///
207
+ (3 = -12.5) ///
208
+ (4 = 0 ) ///
209
+ (5 = 12.5) ///
210
+ (6 = 37.5) ///
211
+ (7 = 75 ) ///
212
+ (8 = 0 ), ///
213
+ gen(S1_IdealApp_Instagram_recode)
214
+
215
+ sum S1_IdealApp_Instagram_recode
216
+ local mean = r(mean) * -1
217
+ latex_rounded, name(idealreductioninsta) value(`mean') digits(0)
218
+
219
+ * Get mean Twitter ideal reduction
220
+ recode S1_IdealApp_Twitter ///
221
+ (1 = -75 ) ///
222
+ (2 = -37.5) ///
223
+ (3 = -12.5) ///
224
+ (4 = 0 ) ///
225
+ (5 = 12.5) ///
226
+ (6 = 37.5) ///
227
+ (7 = 75 ) ///
228
+ (8 = 0 ), ///
229
+ gen(S1_IdealApp_Twitter_recode)
230
+
231
+ sum S1_IdealApp_Twitter_recode
232
+ local mean = r(mean) * -1
233
+ latex_rounded, name(idealreductiontwitter) value(`mean') digits(0)
234
+
235
+ * Get mean Snapchat ideal reduction
236
+ recode S1_IdealApp_Snapchat ///
237
+ (1 = -75 ) ///
238
+ (2 = -37.5) ///
239
+ (3 = -12.5) ///
240
+ (4 = 0 ) ///
241
+ (5 = 12.5) ///
242
+ (6 = 37.5) ///
243
+ (7 = 75 ) ///
244
+ (8 = 0 ), ///
245
+ gen(S1_IdealApp_Snapchat_recode)
246
+
247
+ sum S1_IdealApp_Snapchat_recode
248
+ local mean = r(mean) * -1
249
+ latex_rounded, name(idealreductionsnap) value(`mean') digits(0)
250
+
251
+ * Get mean Browser ideal reduction
252
+ recode S1_IdealApp_Browser ///
253
+ (1 = -75 ) ///
254
+ (2 = -37.5) ///
255
+ (3 = -12.5) ///
256
+ (4 = 0 ) ///
257
+ (5 = 12.5) ///
258
+ (6 = 37.5) ///
259
+ (7 = 75 ) ///
260
+ (8 = 0 ), ///
261
+ gen(S1_IdealApp_Browser_recode)
262
+
263
+ sum S1_IdealApp_Browser_recode
264
+ local mean = r(mean) * -1
265
+ latex_rounded, name(idealreductionbrowser) value(`mean') digits(0)
266
+
267
+ * Get mean YouTube ideal reduction
268
+ recode S1_IdealApp_YouTube ///
269
+ (1 = -75 ) ///
270
+ (2 = -37.5) ///
271
+ (3 = -12.5) ///
272
+ (4 = 0 ) ///
273
+ (5 = 12.5) ///
274
+ (6 = 37.5) ///
275
+ (7 = 75 ) ///
276
+ (8 = 0 ), ///
277
+ gen(S1_IdealApp_YouTube_recode)
278
+
279
+ sum S1_IdealApp_YouTube_recode
280
+ local mean = r(mean) * -1
281
+ latex_rounded, name(idealreductionyoutube) value(`mean') digits(0)
282
+
283
+ end
284
+
285
+ program get_life_worse
286
+ * Get percent life worse
287
+ tab_percent, ///
288
+ var(S1_LifeBetter) key(-5, -4, -3, -2, -1) ///
289
+ name(percentlifeworse) digits(0)
290
+ end
291
+
292
+ program get_addict
293
+ * Get mean addiction index
294
+ sum S1_AddictionIndex
295
+ local mean = r(mean) * -1
296
+ latex_rounded, name(scaleaddict) value(`mean') digits(1)
297
+ end
298
+
299
+ program get_bonus_effect
300
+ preserve
301
+ local baseline PD_P1_UsageFITSBY
302
+ local yvar PD_P2_UsageFITSBY
303
+ gen_treatment, simple
304
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
305
+ local treatment = -_b[B]
306
+ latex_precision, name(bonustwo) value(`treatment') digits(2)
307
+
308
+ local baseline PD_P1_UsageFITSBY
309
+ local yvar PD_P3_UsageFITSBY
310
+ gen_treatment, simple
311
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
312
+ local treatment = -_b[B]
313
+ latex_precision, name(bonusthree) value(`treatment') digits(2)
314
+
315
+ sum PD_P3_UsageFITSBY if B == 0 & L == 0
316
+ local reduction = (`treatment'/r(mean))*100
317
+ latex_precision, name(bonusthreepct) value(`reduction') digits(2)
318
+
319
+ local baseline PD_P1_UsageFITSBY
320
+ local yvar PD_P4_UsageFITSBY
321
+ gen_treatment, simple
322
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
323
+ local treatment4 = -_b[B]
324
+ latex_precision, name(bonusfour) value(`treatment4') digits(2)
325
+
326
+ local baseline PD_P1_UsageFITSBY
327
+ local yvar PD_P5_UsageFITSBY
328
+ gen_treatment, simple
329
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
330
+ local treatment5 = -_b[B]
331
+ latex_precision, name(bonusfive) value(`treatment5') digits(2)
332
+ restore
333
+ end
334
+
335
+ program get_limit_effect
336
+ preserve
337
+
338
+ local baseline PD_P1_UsageFITSBY
339
+ local yvar PD_P5432_UsageFITSBY
340
+ gen_treatment, simple
341
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
342
+ local treatment = -_b[L]
343
+ latex_precision, name(limiteffectstataadhoc) value(`treatment') digits(2)
344
+
345
+ sum PD_P5432_UsageFITSBY if B == 0 & L == 0
346
+ local reduction = (`treatment'/r(mean))*100
347
+
348
+ latex_precision, name(limiteffectpct) value(`reduction') digits(2)
349
+ restore
350
+ end
351
+
352
+ program get_valuations
353
+ preserve
354
+
355
+ sum S2_MPL
356
+ local vb = r(mean)
357
+ latex_precision, name(valuebonus) value(`vb') digits(2)
358
+
359
+ sum S3_MPLLimit
360
+ local vl = r(mean)
361
+ local numlimit = r(N)
362
+ latex_precision, name(valuelimit) value(`vl') digits(3)
363
+
364
+
365
+ sum S3_MPLLimit if S3_MPLLimit > 0
366
+ local numpaylimit = r(N)
367
+ local positivelimit = (`numpaylimit' / `numlimit') * 100
368
+ latex_precision, name(positivelimit) value(`positivelimit') digits(2)
369
+
370
+ sum S3_MPLLimit if S3_MPLLimit > 10
371
+ local numpayten = r(N)
372
+ local tenlimit = (`numpayten' / `numlimit') * 100
373
+ latex_precision, name(tenlimit) value(`tenlimit') digits(2)
374
+
375
+ restore
376
+ end
377
+
378
+ program get_baseline_usage
379
+ preserve
380
+
381
+ sum PD_P1_Usage
382
+ local avg_all = r(mean)
383
+ latex_precision, name(avgOverall) value(`avg_all') digits(2)
384
+
385
+ sum PD_P1_UsageFITSBY
386
+ local avg_fitsby = r(mean)
387
+ latex_precision, name(avgFITSBY) value(`avg_fitsby') digits(2)
388
+
389
+ local avg_fitsby_pct = (`avg_fitsby' / `avg_all') * 100
390
+ latex_precision, name(avgFITSBYpct) value(`avg_fitsby_pct') digits(2)
391
+
392
+ sum PD_P1_Usage_Facebook
393
+ local avg_fb = r(mean)
394
+ latex_precision, name(avgFB) value(`avg_fb') digits(2)
395
+
396
+ sum PD_P1_Usage_Browser
397
+ local avg_br = r(mean)
398
+ latex_precision, name(avgBR) value(`avg_br') digits(2)
399
+
400
+ sum PD_P1_Usage_YouTube
401
+ local avg_yt = r(mean)
402
+ latex_precision, name(avgYT) value(`avg_yt') digits(2)
403
+
404
+ sum PD_P1_Usage_Instagram
405
+ local avg_in = r(mean)
406
+ latex_precision, name(avgIN) value(`avg_in') digits(2)
407
+
408
+ sum PD_P1_Usage_Snapchat
409
+ local avg_sc = r(mean)
410
+ latex_precision, name(avgSC) value(`avg_sc') digits(2)
411
+
412
+ sum PD_P1_Usage_Twitter
413
+ local avg_tw = r(mean)
414
+ latex_precision, name(avgTW) value(`avg_tw') digits(2)
415
+
416
+ restore
417
+ end
418
+
419
+ program get_compare2019
420
+ preserve
421
+
422
+ sum S1_CovidChangesFreeTime
423
+ local ss = r(N)
424
+
425
+ sum S1_CovidChangesFreeTime if S1_CovidChangesFreeTime > 4
426
+ local num_worse = r(N)
427
+
428
+ local covidfree = 100 * `num_worse'/`ss'
429
+ latex_precision, name(covidmorefree) value(`covidfree') digits(2)
430
+
431
+ recode S1_CovidChangeReason ///
432
+ (1 = 4 "Increased phone usage") ///
433
+ (2 = 4 "Increased phone usage") ///
434
+ (3 = 3 "No change") ///
435
+ (4 = 4 "Increased phone usage") ///
436
+ (5 = 2 "Decreased phone usage") ///
437
+ (6 = 1 "Other"), ///
438
+ gen(S1_CovidChangeReason_recode)
439
+
440
+ sum S1_CovidChangesFreeTime
441
+ local ss2 = r(N)
442
+
443
+ sum S1_CovidChangeReason_recode if S1_CovidChangeReason_recode == 4
444
+ local num_more_phone = r(N)
445
+
446
+ local morephoneuse = 100 * `num_more_phone'/`ss2'
447
+ latex_precision, name(morephoneuse) value(`morephoneuse') digits(2)
448
+
449
+ restore
450
+ end
451
+
452
+ program get_substitution
453
+ preserve
454
+
455
+ gen_treatment, simple
456
+ reg_treatment, yvar(S4_Substitution_W) indep($STRATA) simple
457
+
458
+ local bsub = -_b[B]
459
+ latex_precision, name(bonussubstitution) value(`bsub') digits(2)
460
+
461
+ local lsub = _b[L]
462
+ latex_precision, name(limitsubstitution) value(`lsub') digits(2)
463
+
464
+ gen avg_overall = (PD_P3_Usage + PD_P4_Usage + PD_P5_Usage)/3
465
+ gen avg_fitsby = (PD_P3_UsageFITSBY + PD_P4_UsageFITSBY + PD_P5_UsageFITSBY)/3
466
+
467
+ gen avg_non_fitsby = avg_overall - avg_fitsby
468
+ reg_treatment, yvar(avg_non_fitsby) indep($STRATA) simple
469
+
470
+ local bsub = -_b[B]
471
+ latex_precision, name(bonusnonfitsby) value(`bsub') digits(2)
472
+
473
+ local lsub = _b[L]
474
+ latex_precision, name(limitnonfitsby) value(`lsub') digits(2)
475
+ restore
476
+ end
477
+
478
+ program get_swb_pvalues
479
+ est clear
480
+
481
+ * Preserve data
482
+ preserve
483
+
484
+ * Reshape data
485
+ reshape_swb
486
+
487
+ * Specify regression
488
+ local yvar SWBIndex_N
489
+
490
+ * Run regressions
491
+ local baseline = "S1_`yvar'"
492
+
493
+ * Treatment indicators
494
+ gen_treatment, simple
495
+ cap drop B3
496
+ cap drop B4
497
+ gen B3 = B * (S == 1)
498
+ gen B4 = B * (S == 2)
499
+
500
+ * Specify regression
501
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
502
+
503
+ reg `yvar' L B3 B4 `indep', robust cluster(UserID)
504
+
505
+ local lprob = _P[L]
506
+ local lcoef = _b[L]
507
+ latex_precision, name(limitSWBpval) value(`lprob') digits(2)
508
+ latex_precision, name(limitSWBcoef) value(`lcoef') digits(1)
509
+
510
+ local bprob = _P[B4]
511
+ local bcoef = _b[B4]
512
+ latex_precision, name(bonusSWBpval) value(`bprob') digits(2)
513
+ latex_precision, name(bonusSWBcoef) value(`bcoef') digits(1)
514
+
515
+ * Restore data
516
+ restore
517
+ end
518
+
519
+ program get_bonus_desire
520
+ sum S2_PredictUseInitial_W
521
+ local avg_prediction = r(mean)
522
+ latex_precision, name(MPLprediction) value(`avg_prediction') digits(2)
523
+
524
+ sum S2_PredictUseBonus
525
+ local avg_reduction_pct = r(mean)
526
+ latex_precision, name(MPLreductionpct) value(`avg_reduction_pct') digits(2)
527
+
528
+ gen reduction = S2_PredictUseInitial_W * (S2_PredictUseBonus / 100)
529
+ sum reduction
530
+ local avg_reduction_mins = r(mean)
531
+ latex_precision, name(MPLreductionmins) value(`avg_reduction_mins') digits(2)
532
+
533
+ gen value = (reduction/60)*50
534
+ sum value
535
+ local avg_bonus_earnings = r(mean)
536
+ latex_precision, name(MPLearnings) value(`avg_bonus_earnings') digits(2)
537
+
538
+ sum S2_MPL
539
+ local avg_value_bonus = r(mean)
540
+ latex_precision, name(MPLvalue) value(`avg_value_bonus') digits(2)
541
+
542
+ gen premium = S2_MPL - value
543
+ sum premium
544
+ local avg_premium = r(mean)
545
+ latex_precision, name(MPLpremium) value(`avg_premium') digits(2)
546
+
547
+ sum S2_MPLReasoning
548
+ local total_respondents = r(N)
549
+
550
+ sum S2_MPLReasoning if S2_MPLReasoning == 2
551
+ local wish_reduce = r(N)
552
+ local wish_reduce_pct = (`wish_reduce' / `total_respondents') * 100
553
+ latex_precision, name(MPLwishreduce) value(`wish_reduce_pct') digits(2)
554
+
555
+ sum S2_MPLReasoning if S2_MPLReasoning == 1
556
+ local maximize = r(N)
557
+ local maximize_pct = (`maximize' / `total_respondents') * 100
558
+ latex_precision, name(MPLmaximize) value(`maximize_pct') digits(2)
559
+
560
+ sum S2_MPLReasoning if S2_MPLReasoning == 3
561
+ local no_pressure = r(N)
562
+ local no_pressure_pct = (`no_pressure' / `total_respondents') * 100
563
+ latex_precision, name(MPLnopressure) value(`no_pressure_pct') digits(2)
564
+
565
+ sum premium if S2_MPLReasoning == 2
566
+ local premium_reduce = r(mean)
567
+
568
+ sum premium if S2_MPLReasoning == 3
569
+ local premium_no_pressure = r(mean)
570
+
571
+ local premium_difference = `premium_reduce' - `premium_no_pressure'
572
+ latex_precision, name(MPLpremiumdifference) value(`premium_difference') digits(2)
573
+ end
574
+
575
+ program get_pd_usage
576
+ gen_treatment, simple
577
+
578
+ sum PD_P5432_UsageMinutesPD if B == 1
579
+ local mins_bonus = r(mean) / 84
580
+ latex_precision, name(BonusPDmins) value(`mins_bonus') digits(2)
581
+
582
+ sum PD_P5432_UsageMinutesPD if L == 1
583
+ local mins_limit = r(mean) / 84
584
+ latex_precision, name(LimitPDmins) value(`mins_limit') digits(2)
585
+
586
+ sum PD_P5432_UsageMinutesPD if B == 0 & L == 0
587
+ local mins_control = r(mean) / 84
588
+ latex_precision, name(ControlPDmins) value(`mins_control') digits(1)
589
+
590
+ end
591
+
592
+ program get_medians
593
+ sum S0_Age, detail
594
+ local med_age = r(p50)
595
+ latex_precision, name(MedianAge) value(`med_age') digits(2)
596
+
597
+ sum PD_P1_UsageFITSBY, detail
598
+ local med_use = r(p50)
599
+ latex_precision, name(MedianFITSBYUsage) value(`med_use') digits(2)
600
+ end
601
+
602
+ program get_bound_use
603
+ gen baseline = ceil(PD_P1_UsageFITSBY/60)*60
604
+ gen exceeds = PD_P3_UsageFITSBY > baseline
605
+ sum exceeds if S3_Bonus == 1
606
+ local pct_exceed = r(mean) * 100
607
+ latex_precision, name(PercentExceedBonus) value(`pct_exceed') digits(2)
608
+
609
+ gen huge_drop = PD_P3_UsageFITSBY < (baseline - 180)
610
+ sum huge_drop if S3_Bonus == 1
611
+ local pct_huge_drop = r(mean) * 100
612
+ latex_rounded, name(PercentBoundDrop) value(`pct_huge_drop') digits(0)
613
+ end
614
+
615
+ program get_other_blocker_use
616
+ sum S1_OtherLimitUse if S1_Finished == 1
617
+ local pct_other_blocker_use = r(mean) * 100
618
+ latex_rounded, name(OtherBlockerUse) value(`pct_other_blocker_use') digits(0)
619
+ end
620
+
621
+ ***********
622
+ * Execute *
623
+ ***********
624
+
625
+ main
17/replication_package/code/analysis/descriptive/code/Temptation.do ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Figure 1
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ yaml global STRATA = YAML.metadata.strata
18
+
19
+ global app_list Facebook Instagram Twitter Snapchat Browser YouTube Other
20
+ end
21
+
22
+
23
+
24
+ **********************
25
+ * Analysis functions *
26
+ **********************
27
+
28
+ program main
29
+ define_constants
30
+ import_data
31
+ plot_figure_1
32
+ end
33
+
34
+ program import_data
35
+ use "input/final_data_sample.dta", clear
36
+ end
37
+
38
+
39
+ program plot_figure_1
40
+ * Preserve data
41
+ preserve
42
+
43
+ * Drop unnecessary columns
44
+ keep UserID S4_Temptation_*
45
+ * Pivot the columns into a new variable
46
+ reshape long S4_ , i(UserID) j(control) string
47
+ * Assign values to too little (-1) the right amount (0) too much (1)
48
+ recode S4_ (1 = -1) (2 = 0 ) (3 = 1), gen(S4_N)
49
+
50
+ * Relabel
51
+ replace control="Exercise" if control=="Temptation_1"
52
+ replace control="{bf:Use smartphone" if control=="Temptation_2"
53
+ replace control="Eat unhealthy food" if control=="Temptation_3"
54
+ replace control="{bf:Check email" if control=="Temptation_4"
55
+ replace control="{bf:Play video games" if control=="Temptation_5"
56
+ replace control="Watch TV" if control=="Temptation_6"
57
+ replace control="Work" if control=="Temptation_7"
58
+ replace control="{bf:Browse social media" if control=="Temptation_8"
59
+ replace control="Smoke cigarettes" if control=="Temptation_9"
60
+ replace control="{bf:Read online news" if control=="Temptation_10"
61
+ replace control="Drink alcohol" if control=="Temptation_11"
62
+ replace control="Sleep" if control=="Temptation_12"
63
+ replace control="Save for retirement" if control=="Temptation_13"
64
+
65
+ * Collapse to values of interest
66
+ drop UserID
67
+ collapse (mean) S4_m = S4_N (semean) S4_se=S4_N (count) S4_count = S4_N, by(control)
68
+ * Change label for - values, take absolute, and sort.
69
+ replace control=control+" (-1)" if S4_m<0
70
+ replace control=control+"}" if strpos(control,"bf")>0
71
+
72
+ replace S4_m=abs(S4_m)
73
+ gsort -S4_m
74
+
75
+ * Create 95% CI bands
76
+ gen S4_m_lb = S4_m - 1.96*S4_se
77
+ gen S4_m_ub = S4_m + 1.96*S4_se
78
+
79
+ * Plot
80
+ gen axis = _n
81
+ labmask axis, val(control)
82
+
83
+ twoway (rcap S4_m_lb S4_m_ub axis, lcolor(maroon)) (scatter S4_m axis, msize(small)), ///
84
+ xlabel(1(1)13,valuelabel angle(45) labsize(small)) ///
85
+ ytitle("absolute value of" "(share “too much” – share “too little”)", ///
86
+ size(small)) xtitle("{&larr} more perceived self-control problems | less perceived self-control problems {&rarr}") legend(off) graphregion(color(white))
87
+
88
+ graph export "output/online_and_offline_temptation_scatter.pdf", replace
89
+
90
+ * Restore data
91
+ restore
92
+
93
+
94
+ end
95
+
96
+ ***********
97
+ * Execute *
98
+ ***********
99
+
100
+ main
17/replication_package/code/analysis/descriptive/input.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aacb5e3a47846afaf251dbe069f5cee136e1255fe4ec43721a033fafc1d837d
3
+ size 812
17/replication_package/code/analysis/descriptive/make.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################
2
+ ### ENVIRONMENT ###
3
+ ###################
4
+ import git
5
+ import imp
6
+ import os
7
+
8
+ ### SET DEFAULT PATHS
9
+ ROOT = '../..'
10
+
11
+ PATHS = {
12
+ 'root' : ROOT,
13
+ 'lib' : os.path.join(ROOT, 'lib'),
14
+ 'config' : os.path.join(ROOT, 'config.yaml'),
15
+ 'config_user' : os.path.join(ROOT, 'config_user.yaml'),
16
+ 'input_dir' : 'input',
17
+ 'external_dir' : 'external',
18
+ 'output_dir' : 'output',
19
+ 'output_local_dir' : 'output_local',
20
+ 'makelog' : 'log/make.log',
21
+ 'output_statslog' : 'log/output_stats.log',
22
+ 'source_maplog' : 'log/source_map.log',
23
+ 'source_statslog' : 'log/source_stats.log',
24
+ }
25
+
26
+ ### LOAD GSLAB MAKE
27
+ f, path, desc = imp.find_module('gslab_make', [PATHS['lib']])
28
+ gs = imp.load_module('gslab_make', f, path, desc)
29
+
30
+ ### LOAD CONFIG USER
31
+ PATHS = gs.update_paths(PATHS)
32
+ gs.update_executables(PATHS)
33
+
34
+ ############
35
+ ### MAKE ###
36
+ ############
37
+
38
+ ### START MAKE
39
+ gs.remove_dir(['input', 'external'])
40
+ gs.clear_dir(['output', 'log', 'temp'])
41
+ gs.start_makelog(PATHS)
42
+
43
+ ### GET INPUT FILES
44
+ inputs = gs.link_inputs(PATHS, ['input.txt'])
45
+ # gs.write_source_logs(PATHS, inputs + externals)
46
+ # gs.get_modified_sources(PATHS, inputs + externals)
47
+
48
+ ### RUN SCRIPTS
49
+ """
50
+ Critical
51
+ --------
52
+ Many of the Stata analysis scripts recode variables using
53
+ the `recode` command. Double-check all `recode` commands
54
+ to confirm recoding is correct, especially when reusing
55
+ code for a different experiment version.
56
+ """
57
+
58
+ gs.run_stata(PATHS, program = 'code/Scalars.do')
59
+ #gs.run_stata(PATHS, program = 'code/SampleStatistics.do')
60
+ gs.run_stata(PATHS, program = 'code/DataDescriptives.do')
61
+ gs.run_stata(PATHS, program = 'code/QualitativeEvidence.do')
62
+ gs.run_stata(PATHS, program = 'code/CommitmentDemand.do')
63
+ gs.run_stata(PATHS, program = 'code/COVIDResponse.do')
64
+ gs.run_stata(PATHS, program = 'code/Temptation.do')
65
+
66
+ gs.run_r(PATHS, program = 'code/HeatmapPlots.R')
67
+
68
+ ### LOG OUTPUTS
69
+ gs.log_files_in_output(PATHS)
70
+
71
+ ### CHECK FILE SIZES
72
+ #gs.check_module_size(PATHS)
73
+
74
+ ### END MAKE
75
+ gs.end_makelog(PATHS)
17/replication_package/code/analysis/structural/.RData ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7292ca6438e8b7e80608a5015f136523305b10e1497fa96808904f0c51ab72cd
3
+ size 3785816
17/replication_package/code/analysis/structural/.Rhistory ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ install.packages("optimx")
2
+ library("optimx")
3
+ library("stats")
4
+ library("tidyverse")
5
+ square_function <- function(x){
6
+ y = x^2
7
+ return(y)
8
+ }
9
+ square_function(x=3)
10
+ square_function(x=0)
11
+ intial_values <- c(-2,1,2)
12
+ minimise_function <- optimr(intial_values, square_function)
13
+ intial_values <- c(-2)
14
+ minimise_function <- optimr(intial_values, square_function)
15
+ minimise_function <- optimr(intial_values, square_function, method = "Brent")
16
+ minimise_function <- optimr(intial_values, square_function)
17
+ minimise_function <- optimr(par = intial_values, fn=square_function, method = "Brent")
18
+ install.packages("Brent")
19
+ minimise_function <- optimize(f=square_function, lower = -10, upper=10)
20
+ minimise_function$par
21
+ minimise_function
22
+ minimise_function <- optimize(f=square_function, lower = -10000000, upper=100000000)
23
+ minimise_function
24
+ cube_function <- function(x){
25
+ y = x^3
26
+ return(y)
27
+ }
28
+ minimise_function <- optimize(f=cube_function, lower = -10000000, upper=100000000)
29
+ minimise_function
30
+ sinus_function <- function(x){
31
+ y = sin(x)
32
+ return(y)
33
+ }
34
+ minimise_function <- optimize(f=sinus_function, lower = -10000000, upper=100000000)
35
+ minimise_function
36
+ bivariate_function <- function(x,y){
37
+ z <- 2*x*(y**2)+2*(x**2)*y+x*y
38
+ return(z)
39
+ }
40
+ # 1. First try a few values of x, y and see how it affect z
41
+ x<- seq(-0.5,0.5, len=200)
42
+ y<- seq(-0.5,0.5, len=200)
43
+ z <- outer(x,y,bivariate_function)
44
+ persp(x,y,z, theta=-30,phi=15,ticktype="detailed")
45
+ image(x,y,z)
46
+ bivariate_function_vector <- function(vec){
47
+ x <- vec[1]
48
+ y <- vec[2]
49
+ z <- 2*x*(y**2)+2*(x**2)*y+x*y
50
+ return(z)
51
+ }
52
+ minimise_function_bivariate <- optimr(par = c(0.5,0.5), bivariate_function_vector, control=list(fnscale=-1))
53
+ minimise_function_bivariate$par
54
+ minimise_function_bivariate <- optimr(par = c(0.5,0.5), bivariate_function_vector)
55
+ minimise_function_bivariate$par
56
+ minimise_function_bivariate$par
57
+ minimise_function_bivariate <- optimr(par = c(0.5,0.5), bivariate_function)
58
+ minimise_function_bivariate <- optimr(par = c(0.5,0.5), bivariate_function_vector)
59
+ minimise_function_bivariate$par
60
+ bivariate_function_vector <- function(vec){
61
+ x <- vec[1]
62
+ y <- vec[2]
63
+ z <- (1-x)^2 + 100*(y-x^2)
64
+ return(z)
65
+ }
66
+ minimise_function_bivariate <- optimr(par = c(0,0), bivariate_function_vector)
67
+ minimise_function_bivariate$par
68
+ bivariate_function_vector <- function(vec){
69
+ x <- vec[1]
70
+ y <- vec[2]
71
+ z <- (1-x)^2 + 100*(y-x^2)^2
72
+ return(z)
73
+ }
74
+ minimise_function_bivariate <- optimr(par = c(0,0), bivariate_function_vector)
75
+ minimise_function_bivariate$par
76
+ remvove(list=ls())
77
+ remove(list=ls())
78
+ getwd()
79
+ setwd("/Users/houdanaitelbarj/Desktop/PhoneAddiction/analysis/structural")
80
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
81
+ # Setup
82
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
83
+ # Import plotting functions and constants from lib file
84
+ source('input/lib/r/ModelFunctions.R')
85
+ # Import data
86
+ df <- import_data()
87
+ param %<>%
88
+ list.merge(
89
+ #get_opt(df),
90
+ get_taus(df, winsorize=winsorize, full=full),
91
+ get_mispredict(df),
92
+ get_ideal(df),
93
+ get_predict(df),
94
+ get_wtp(df),
95
+ get_avg_use(df),
96
+ get_fb(df),
97
+ get_limit_last_week(df)
98
+ )
99
+ param <- param_initial
100
+ param %<>%
101
+ list.merge(
102
+ #get_opt(df),
103
+ get_taus(df, winsorize=winsorize, full=full),
104
+ get_mispredict(df),
105
+ get_ideal(df),
106
+ get_predict(df),
107
+ get_wtp(df),
108
+ get_avg_use(df),
109
+ get_fb(df),
110
+ get_limit_last_week(df)
111
+ )
112
+ winsorize=F
113
+ full=F
114
+ param %<>%
115
+ list.merge(
116
+ #get_opt(df),
117
+ get_taus(df, winsorize=winsorize, full=full),
118
+ get_mispredict(df),
119
+ get_ideal(df),
120
+ get_predict(df),
121
+ get_wtp(df),
122
+ get_avg_use(df),
123
+ get_fb(df),
124
+ get_limit_last_week(df)
125
+ )
126
+ View(param)
127
+ param %<>%
128
+ solve_sys_eq_1 %>%
129
+ as.list %>%
130
+ list.merge(param)
131
+ # Solve system of equations #2
132
+ param %<>%
133
+ solve_sys_eq_2(display_warning=display_warning) %>%
134
+ as.list %>%
135
+ list.merge(param)
136
+ display_warning=FALSE
137
+ # Solve system of equation #1
138
+ param %<>%
139
+ solve_sys_eq_1 %>%
140
+ as.list %>%
141
+ list.merge(param)
142
+ # Solve system of equations #2
143
+ param %<>%
144
+ solve_sys_eq_2(display_warning=display_warning) %>%
145
+ as.list %>%
146
+ list.merge(param)
147
+ param %<>%
148
+ solve_sys_eq_3 %>%
149
+ as.list %>%
150
+ list.merge(param)
151
+ # Solve for individual effects
152
+ tau_L_2_spec <- find_tau_L2_spec(df)
153
+ tau_tilde_spec <- find_tau_L3_spec(df)
154
+ x_ss_i_data <- calculate_x_ss_i_spec(df)
155
+ param %<>%
156
+ solve_effects_individual(x_ss_i_data= x_ss_i_data, tau_tilde_L=tau_tilde_spec, tau_L_2=tau_L_2_spec, w=df$w)%>%
157
+ as.list %>%
158
+ list.merge(param)
159
+ rho <- param[['rho']]
160
+ lambda <- param[['lambda']]
161
+ rho_res <- param[['rho_res']]
162
+ lambda_res <- param[['lambda_res']]
163
+ delta <- param[['delta']]
164
+ alpha <- param[['alpha']]
165
+ omega <- param[['omega']]
166
+ omega_est <- param[['omega_est']]
167
+ mispredict <- param[['mispredict']]
168
+ d_L <- param[['d_L']]
169
+ d_CL <- param[['d_CL']]
170
+ eta <- param[['eta']]
171
+ zeta <- param[['zeta']]
172
+ naivete <- param[['naivete']]
173
+ gamma_L_effect <- param[['gamma_L_effect']]
174
+ gamma_tilde_L_effect <- param[['gamma_tilde_L_effect']]
175
+ gamma_tilde_L_effect_omega <- param[['gamma_tilde_L_effect_omega']]
176
+ gamma_L_effect_omega <- param[['gamma_L_effect_omega']]
177
+ gamma_L_effect_multiple <- param[['gamma_L_effect_multiple']]
178
+ gamma_tilde_L_effect_multiple <- param[['gamma_tilde_L_effect_multiple']]
179
+ gamma_L <- param[['gamma_L']]
180
+ gamma_tilde_L <- param[['gamma_tilde_L']]
181
+ gamma_tilde_L_omega <- param[['gamma_tilde_L_omega']]
182
+ gamma_L_omega <- param[['gamma_L_omega']]
183
+ gamma_tilde_L_multiple <- param[['gamma_tilde_L_multiple']]
184
+ gamma_L_multiple <- param[['gamma_L_multiple']]
185
+ gamma_B <- param[['gamma_B']]
186
+ gamma_tilde_B <- param[['gamma_tilde_B']]
187
+ gamma_tilde_B_multiple <- param[['gamma_tilde_B_multiple']]
188
+ gamma_B_multiple <- param[['gamma_B_multiple']]
189
+ eta_res <- param[['eta_res']]
190
+ zeta_res <- param[['zeta_res']]
191
+ naivete_res <- param[['naivete_res']]
192
+ gamma_L_effect_res <- param[['gamma_L_effect_res']]
193
+ gamma_tilde_L_effect_res <- param[['gamma_tilde_L_effect_res']]
194
+ gamma_tilde_L_effect_omega_res <- param[['gamma_tilde_L_effect_omega_res']]
195
+ gamma_L_effect_omega_res <- param[['gamma_L_effect_omega_res']]
196
+ gamma_tilde_L_effect_multiple_res <- param[['gamma_tilde_L_effect_multiple_res']]
197
+ gamma_L_res <- param[['gamma_L_res']]
198
+ gamma_L_omega_res <- param[['gamma_L_omega_res']]
199
+ gamma_L_multiple_res <- param[['gamma_L_multiple_res']]
200
+ gamma_B_res <- param[['gamma_B_res']]
201
+ gamma_B_multiple_res <- param[['gamma_B_multiple_res']]
202
+ tau_L_2_signed <- param[['tau_L_2']]*-1
203
+ # Gamma-spec
204
+ term1 <- (1-alpha)*delta*rho
205
+ term2 <- term1*(1+lambda)
206
+ term3 <- (eta*lambda + zeta*(1 - lambda))*(rho*tau_L_2/omega)
207
+ num <- eta*tau_L_2/omega - term1*term3 - term2*naivete
208
+ denom <- 1 - term2
209
+ num_omega <- eta*tau_L_2/omega_est - term1*term3 - term2*naivete
210
+ gamma_spec <- num/denom
211
+ gamma_spec_omega <- num_omega/denom
212
+ gamma_tilde_spec <- gamma_spec - naivete
213
+ gamma_tilde_spec_omega <- gamma_spec_omega - naivete
214
+ tau_L_2 <- param[['tau_L_2']]
215
+ # Gamma-spec
216
+ term1 <- (1-alpha)*delta*rho
217
+ term2 <- term1*(1+lambda)
218
+ term3 <- (eta*lambda + zeta*(1 - lambda))*(rho*tau_L_2/omega)
219
+ num <- eta*tau_L_2/omega - term1*term3 - term2*naivete
220
+ denom <- 1 - term2
221
+ num_omega <- eta*tau_L_2/omega_est - term1*term3 - term2*naivete
222
+ gamma_spec <- num/denom
223
+ gamma_spec_omega <- num_omega/denom
224
+ gamma_tilde_spec <- gamma_spec - naivete
225
+ gamma_tilde_spec_omega <- gamma_spec_omega - naivete
226
+ intercept_spec <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta)
227
+ intercept_het_L_effect <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta)
228
+ intercept_het_B <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B, gamma_B, alpha, rho, lambda, mispredict, eta, zeta)
229
+ intercept_het_L <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L, gamma_L, alpha, rho, lambda, mispredict, eta, zeta)
230
+ intercept_spec_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec_omega, gamma_spec_omega, alpha, rho, lambda, mispredict, eta, zeta)
231
+ intercept_het_L_effect_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_omega, gamma_L_effect_omega, alpha, rho, lambda, mispredict, eta, zeta)
232
+ intercept_het_L_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_omega, gamma_L_omega, alpha, rho, lambda, mispredict, eta, zeta)
233
+ intercept_het_L_effect_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_multiple, gamma_L_effect_multiple, alpha, rho, lambda, mispredict, eta, zeta)
234
+ intercept_het_B_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B_multiple, gamma_B_multiple, alpha, rho, lambda, mispredict, eta, zeta)
235
+ intercept_het_L_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_multiple, gamma_L_multiple, alpha, rho, lambda, mispredict, eta, zeta)
236
+ intercept_het_L_effect_eta_high <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta, eta_scale=1.1)
237
+ intercept_het_L_effect_eta_low <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta, eta_scale=0.9)
238
+ x_ss_spec <- calculate_steady_state(param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta, intercept_spec)
239
+ x_ss_zero_un <- calculate_steady_state(param, 0, 0, alpha, rho, lambda, 0, eta, zeta, intercept_spec)
240
+ x_ss_zero <- ifelse(x_ss_zero_un<0, 0, x_ss_zero_un)
241
+ delta_x <- x_ss_spec - x_ss_zero
242
+ x_ss_spec_w <- weighted.mean(x_ss_spec, w, na.rm=T)
243
+ w=df$w
244
+ x_ss_spec_w <- weighted.mean(x_ss_spec, w, na.rm=T)
245
+ rho <- param[['rho']]
246
+ lambda <- param[['lambda']]
247
+ rho_res <- param[['rho_res']]
248
+ lambda_res <- param[['lambda_res']]
249
+ delta <- param[['delta']]
250
+ alpha <- param[['alpha']]
251
+ omega <- param[['omega']]
252
+ omega_est <- param[['omega_est']]
253
+ mispredict <- param[['mispredict']]
254
+ d_L <- param[['d_L']]
255
+ d_CL <- param[['d_CL']]
256
+ eta <- param[['eta']]
257
+ zeta <- param[['zeta']]
258
+ naivete <- param[['naivete']]
259
+ gamma_L_effect <- param[['gamma_L_effect']]
260
+ gamma_tilde_L_effect <- param[['gamma_tilde_L_effect']]
261
+ gamma_tilde_L_effect_omega <- param[['gamma_tilde_L_effect_omega']]
262
+ gamma_L_effect_omega <- param[['gamma_L_effect_omega']]
263
+ gamma_L_effect_multiple <- param[['gamma_L_effect_multiple']]
264
+ gamma_tilde_L_effect_multiple <- param[['gamma_tilde_L_effect_multiple']]
265
+ gamma_L <- param[['gamma_L']]
266
+ gamma_tilde_L <- param[['gamma_tilde_L']]
267
+ gamma_tilde_L_omega <- param[['gamma_tilde_L_omega']]
268
+ gamma_L_omega <- param[['gamma_L_omega']]
269
+ gamma_tilde_L_multiple <- param[['gamma_tilde_L_multiple']]
270
+ gamma_L_multiple <- param[['gamma_L_multiple']]
271
+ gamma_B <- param[['gamma_B']]
272
+ gamma_tilde_B <- param[['gamma_tilde_B']]
273
+ gamma_tilde_B_multiple <- param[['gamma_tilde_B_multiple']]
274
+ gamma_B_multiple <- param[['gamma_B_multiple']]
275
+ eta_res <- param[['eta_res']]
276
+ zeta_res <- param[['zeta_res']]
277
+ naivete_res <- param[['naivete_res']]
278
+ gamma_L_effect_res <- param[['gamma_L_effect_res']]
279
+ gamma_tilde_L_effect_res <- param[['gamma_tilde_L_effect_res']]
280
+ gamma_tilde_L_effect_omega_res <- param[['gamma_tilde_L_effect_omega_res']]
281
+ gamma_L_effect_omega_res <- param[['gamma_L_effect_omega_res']]
282
+ gamma_tilde_L_effect_multiple_res <- param[['gamma_tilde_L_effect_multiple_res']]
283
+ gamma_L_res <- param[['gamma_L_res']]
284
+ gamma_L_omega_res <- param[['gamma_L_omega_res']]
285
+ gamma_L_multiple_res <- param[['gamma_L_multiple_res']]
286
+ gamma_B_res <- param[['gamma_B_res']]
287
+ gamma_B_multiple_res <- param[['gamma_B_multiple_res']]
288
+ tau_L_2 <- param[['tau_L_2']]
289
+ tau_L_2_signed <- param[['tau_L_2']]*-1
290
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
291
+ # Calculate individual intercepts and steady states under different strategies - Unrestricted alpha
292
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
293
+ # Gamma-spec
294
+ term1 <- (1-alpha)*delta*rho
295
+ term2 <- term1*(1+lambda)
296
+ term3 <- (eta*lambda + zeta*(1 - lambda))*(rho*tau_L_2/omega)
297
+ num <- eta*tau_L_2/omega - term1*term3 - term2*naivete
298
+ denom <- 1 - term2
299
+ num_omega <- eta*tau_L_2/omega_est - term1*term3 - term2*naivete
300
+ gamma_spec <- num/denom
301
+ gamma_spec_omega <- num_omega/denom
302
+ gamma_tilde_spec <- gamma_spec - naivete
303
+ gamma_tilde_spec_omega <- gamma_spec_omega - naivete
304
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
305
+ # Calculate individual intercepts and steady states under different strategies
306
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
307
+ intercept_spec <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta)
308
+ intercept_het_L_effect <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta)
309
+ intercept_het_B <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B, gamma_B, alpha, rho, lambda, mispredict, eta, zeta)
310
+ intercept_het_L <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L, gamma_L, alpha, rho, lambda, mispredict, eta, zeta)
311
+ intercept_spec_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec_omega, gamma_spec_omega, alpha, rho, lambda, mispredict, eta, zeta)
312
+ intercept_het_L_effect_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_omega, gamma_L_effect_omega, alpha, rho, lambda, mispredict, eta, zeta)
313
+ intercept_het_L_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_omega, gamma_L_omega, alpha, rho, lambda, mispredict, eta, zeta)
314
+ intercept_het_L_effect_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_multiple, gamma_L_effect_multiple, alpha, rho, lambda, mispredict, eta, zeta)
315
+ intercept_het_B_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B_multiple, gamma_B_multiple, alpha, rho, lambda, mispredict, eta, zeta)
316
+ intercept_het_L_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_multiple, gamma_L_multiple, alpha, rho, lambda, mispredict, eta, zeta)
317
+ intercept_het_L_effect_eta_high <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta, eta_scale=1.1)
318
+ intercept_het_L_effect_eta_low <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta, eta_scale=0.9)
319
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
320
+ # Calculate individual counterfactuals
321
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
322
+ x_ss_spec <- calculate_steady_state(param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta, intercept_spec)
323
+ x_ss_zero_un <- calculate_steady_state(param, 0, 0, alpha, rho, lambda, 0, eta, zeta, intercept_spec)
324
+ x_ss_zero <- ifelse(x_ss_zero_un<0, 0, x_ss_zero_un)
325
+ delta_x <- x_ss_spec - x_ss_zero
326
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
327
+ # Calculate individual intercepts and steady states under different strategies - Restricted alpha
328
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
329
+ # Gamma-spec
330
+ alpha_res <- 1
331
+ term1_res <- (1-alpha_res)*delta*rho_res
332
+ term2_res <- term1_res*(1+lambda_res)
333
+ term3_res <- (eta_res*lambda_res + zeta_res*(1 - lambda_res))*(rho_res*tau_L_2/omega)
334
+ num_res <- eta_res*tau_L_2/omega - term1_res*term3_res - term2_res*naivete_res
335
+ denom_res <- 1 - term2_res
336
+ num_omega_res <- eta_res*tau_L_2/omega_est - term1_res*term3_res - term2_res*naivete_res
337
+ gamma_spec_res <- num_res/denom_res
338
+ gamma_spec_omega_res <- num_omega_res/denom_res
339
+ gamma_tilde_spec_res <- gamma_spec_res - naivete_res
340
+ gamma_tilde_spec_omega_res <- gamma_spec_omega_res - naivete_res
341
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
342
+ # Calculate individual intercepts and steady states under different strategies
343
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
344
+ intercept_spec_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec_res, gamma_spec_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
345
+ intercept_het_L_effect_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_res, gamma_L_effect_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
346
+ intercept_het_B_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B, gamma_B_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
347
+ intercept_het_L_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L, gamma_L_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
348
+ intercept_spec_omega_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec_omega_res, gamma_spec_omega_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
349
+ intercept_het_L_effect_omega_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_omega_res, gamma_L_effect_omega_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
350
+ intercept_het_L_omega_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_omega, gamma_L_omega_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
351
+ intercept_het_L_effect_multiple_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_multiple, gamma_L_effect_multiple, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
352
+ intercept_het_B_multiple_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B_multiple, gamma_B_multiple, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
353
+ intercept_het_L_multiple_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_multiple, gamma_L_multiple, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res)
354
+ intercept_het_L_effect_eta_high_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res, eta_scale=1.1)
355
+ intercept_het_L_effect_eta_low_res <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res, eta_scale=0.9)
356
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
357
+ # Calculate individual counterfactuals
358
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
359
+ x_ss_spec_res <- calculate_steady_state(param, gamma_tilde_spec_res, gamma_spec_res, alpha = 1, rho_res, lambda_res, mispredict, eta = eta_res, zeta = zeta_res, intercept_spec_res)
360
+ x_ss_zero_un_res <- calculate_steady_state(param, 0, 0, alpha = 1, rho_res, lambda_res, 0, eta = eta_res, zeta = zeta_res, intercept_spec_res)
361
+ x_ss_zero_res <- ifelse(x_ss_zero_un_res<0, 0, x_ss_zero_un_res)
362
+ delta_x_res <- x_ss_spec_res - x_ss_zero_res
363
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
364
+ # Compute population averages
365
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
366
+ x_ss_spec_w <- weighted.mean(x_ss_spec, w, na.rm=T)
367
+ gamma_tilde_spec_w <- weighted.mean(gamma_tilde_spec, w, na.rm=T)
368
+ gamma_spec_w <- weighted.mean(gamma_spec, w, na.rm=T)
369
+ gamma_spec_omega_w <- weighted.mean(gamma_spec_omega, w, na.rm=T)
370
+ delta_x_spec <- weighted.mean(delta_x, w, na.rm=T)
371
+ x_ss_i_data <- weighted.mean(x_ss_i_data, w, na.rm=T)
372
+ remove(list=ls())
373
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
374
+ # Setup
375
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
376
+ # Import plotting functions and constants from lib file
377
+ source('input/lib/r/ModelFunctions.R')
378
+ # Import data
379
+ df <- import_data()
380
+ param <- param_initial
381
+ winsorize=F, full=F, display_warning=FALS
382
+ winsorize=F
383
+ full=F
384
+ display_warning=FALSE
385
+ param %<>%
386
+ list.merge(
387
+ #get_opt(df),
388
+ get_taus(df, winsorize=winsorize, full=full),
389
+ get_mispredict(df),
390
+ get_ideal(df),
391
+ get_predict(df),
392
+ get_wtp(df),
393
+ get_avg_use(df),
394
+ get_fb(df),
395
+ get_limit_last_week(df)
396
+ )
397
+ # Solve system of equation #1
398
+ param %<>%
399
+ solve_sys_eq_1 %>%
400
+ as.list %>%
401
+ list.merge(param)
402
+ # Solve system of equations #2
403
+ param %<>%
404
+ solve_sys_eq_2(display_warning=display_warning) %>%
405
+ as.list %>%
406
+ list.merge(param)
407
+ # Solve system of equations #3
408
+ param %<>%
409
+ solve_sys_eq_3 %>%
410
+ as.list %>%
411
+ list.merge(param)
412
+ # Solve for individual effects
413
+ tau_L_2_spec <- find_tau_L2_spec(df)
414
+ tau_tilde_spec <- find_tau_L3_spec(df)
415
+ x_ss_i_data <- calculate_x_ss_i_spec(df)
416
+ param %<>%
417
+ solve_effects_individual(x_ss_i_data= x_ss_i_data, tau_tilde_L=tau_tilde_spec, tau_L_2=tau_L_2_spec, w=df$w)%>%
418
+ as.list %>%
419
+ list.merge(param)
420
+ tau_tilde_L=tau_tilde_spec
421
+ tau_L_2=tau_L_2_spec
422
+ w=df$w
423
+ rho <- param[['rho']]
424
+ lambda <- param[['lambda']]
425
+ rho_res <- param[['rho_res']]
426
+ lambda_res <- param[['lambda_res']]
427
+ delta <- param[['delta']]
428
+ alpha <- param[['alpha']]
429
+ omega <- param[['omega']]
430
+ omega_est <- param[['omega_est']]
431
+ mispredict <- param[['mispredict']]
432
+ d_L <- param[['d_L']]
433
+ d_CL <- param[['d_CL']]
434
+ eta <- param[['eta']]
435
+ zeta <- param[['zeta']]
436
+ naivete <- param[['naivete']]
437
+ gamma_L_effect <- param[['gamma_L_effect']]
438
+ gamma_tilde_L_effect <- param[['gamma_tilde_L_effect']]
439
+ gamma_tilde_L_effect_omega <- param[['gamma_tilde_L_effect_omega']]
440
+ gamma_L_effect_omega <- param[['gamma_L_effect_omega']]
441
+ gamma_L_effect_multiple <- param[['gamma_L_effect_multiple']]
442
+ gamma_tilde_L_effect_multiple <- param[['gamma_tilde_L_effect_multiple']]
443
+ gamma_L <- param[['gamma_L']]
444
+ gamma_tilde_L <- param[['gamma_tilde_L']]
445
+ gamma_tilde_L_omega <- param[['gamma_tilde_L_omega']]
446
+ gamma_L_omega <- param[['gamma_L_omega']]
447
+ gamma_tilde_L_multiple <- param[['gamma_tilde_L_multiple']]
448
+ gamma_L_multiple <- param[['gamma_L_multiple']]
449
+ gamma_B <- param[['gamma_B']]
450
+ gamma_tilde_B <- param[['gamma_tilde_B']]
451
+ gamma_tilde_B_multiple <- param[['gamma_tilde_B_multiple']]
452
+ gamma_B_multiple <- param[['gamma_B_multiple']]
453
+ eta_res <- param[['eta_res']]
454
+ zeta_res <- param[['zeta_res']]
455
+ naivete_res <- param[['naivete_res']]
456
+ gamma_L_effect_res <- param[['gamma_L_effect_res']]
457
+ gamma_tilde_L_effect_res <- param[['gamma_tilde_L_effect_res']]
458
+ gamma_tilde_L_effect_omega_res <- param[['gamma_tilde_L_effect_omega_res']]
459
+ gamma_L_effect_omega_res <- param[['gamma_L_effect_omega_res']]
460
+ gamma_tilde_L_effect_multiple_res <- param[['gamma_tilde_L_effect_multiple_res']]
461
+ gamma_L_res <- param[['gamma_L_res']]
462
+ gamma_L_omega_res <- param[['gamma_L_omega_res']]
463
+ gamma_L_multiple_res <- param[['gamma_L_multiple_res']]
464
+ gamma_B_res <- param[['gamma_B_res']]
465
+ gamma_B_multiple_res <- param[['gamma_B_multiple_res']]
466
+ tau_L_2_signed <- param[['tau_L_2']]*-1
467
+ # Gamma-spec
468
+ num <- eta*tau_L_2/omega - (1-alpha)*delta*rho*(((eta-zeta)*tau_tilde_L/omega+zeta*rho*tau_L_2/omega) + (1+lambda)*mispredict*(-eta+(1-alpha)*delta*rho^2*((eta-zeta)*lambda+zeta)))
469
+ denom <- 1 - (1-alpha)*delta*rho*(1+lambda)
470
+ num_omega <- eta*tau_L_2/omega_est - (1-alpha)*delta*rho*(((eta-zeta)*tau_tilde_L/omega_est+zeta*rho*tau_L_2/omega) + (1+lambda)*mispredict*(-eta+(1-alpha)*delta*rho^2*((eta-zeta)*lambda+zeta)))
471
+ gamma_spec <- num/denom
472
+ gamma_spec_omega <- num_omega/denom
473
+ gamma_tilde_spec <- gamma_spec - naivete
474
+ gamma_tilde_spec_omega <- gamma_spec_omega - naivete
475
+ intercept_spec <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta)
476
+ intercept_het_L_effect <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta)
477
+ intercept_het_B <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B, gamma_B, alpha, rho, lambda, mispredict, eta, zeta)
478
+ intercept_het_L <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L, gamma_L, alpha, rho, lambda, mispredict, eta, zeta)
479
+ intercept_spec_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_spec_omega, gamma_spec_omega, alpha, rho, lambda, mispredict, eta, zeta)
480
+ intercept_het_L_effect_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_omega, gamma_L_effect_omega, alpha, rho, lambda, mispredict, eta, zeta)
481
+ intercept_het_L_omega <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_omega, gamma_L_omega, alpha, rho, lambda, mispredict, eta, zeta)
482
+ intercept_het_L_effect_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect_multiple, gamma_L_effect_multiple, alpha, rho, lambda, mispredict, eta, zeta)
483
+ intercept_het_B_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_B_multiple, gamma_B_multiple, alpha, rho, lambda, mispredict, eta, zeta)
484
+ intercept_het_L_multiple <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_multiple, gamma_L_multiple, alpha, rho, lambda, mispredict, eta, zeta)
485
+ intercept_het_L_effect_eta_high <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta, eta_scale=1.1)
486
+ intercept_het_L_effect_eta_low <- calculate_intercept_spec(x_ss_i_data, param, gamma_tilde_L_effect, gamma_L_effect, alpha, rho, lambda, mispredict, eta, zeta, eta_scale=0.9)
487
+ x_ss_spec <- calculate_steady_state(param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta, intercept_spec)
488
+ x_ss_spec <- calculate_steady_state(param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta, intercept_spec)
489
+ calculate_steady_state <- function(param, gamma_tilde, gamma, alpha, rho, lambda, mispredict, eta, zeta, intercept=NA, eta_scale=1){
490
+ # Define
491
+ eta <- eta * eta_scale
492
+ delta <- param[['delta']]
493
+ p_B <- param[['p_B']]
494
+ # Calculate
495
+ p <- 0
496
+ term_pre <- (1 - (1-alpha)*delta*rho)
497
+ term1 <- intercept - p*term_pre
498
+ term2 <- (1-alpha)*delta*rho
499
+ term3 <- (eta - zeta) * mispredict + gamma_tilde*(1+lambda)
500
+ num <- term1 - term2*term3 + gamma
501
+ terma <- term_pre*(-eta - zeta * (rho / (1 - rho)))
502
+ termb <- (1-alpha)*delta*rho*zeta
503
+ denom <- terma + termb
504
+ print(paste0("denom: ", denom))
505
+ x_ss_calc <- num /denom
506
+ return(x_ss_calc)
507
+ }
508
+ x_ss_spec <- calculate_steady_state(param, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta, intercept_spec)
509
+ x_ss_zero_un <- calculate_steady_state(param, 0, 0, alpha, rho, lambda, 0, eta, zeta, intercept_spec)
510
+ x_ss_zero <- ifelse(x_ss_zero_un<0, 0, x_ss_zero_un)
511
+ delta_x <- x_ss_spec - x_ss_zero
512
+ x_ss_spec_w <- weighted.mean(x_ss_spec, w, na.rm=T)
17/replication_package/code/analysis/structural/README.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # README
2
+
3
+ This module estimates parameters and generates plots for our structural model.
4
+
5
+ `/code/` contains the below file:
6
+ * StructuralModel.R
17/replication_package/code/analysis/structural/code/StructuralModel.R ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2
+ # Setup
3
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ # Import plotting functions and constants from lib file
6
+ source('input/lib/r/ModelFunctions.R')
7
+
8
+ # Import data
9
+ df <- import_data()
10
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11
+ # Nice scalars
12
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
+
14
+ nice_scalars <- function(param){
15
+
16
+ limiteffectlastweeknice <- signif(param$limit_effect_last_week, digits=2) * -1
17
+ limiteffect <- signif(param$tau_L, digits=2) * -1
18
+ mispredictnice <- signif(param$mispredict, digits=2)
19
+
20
+ tautildenice <- signif(param$tau_tilde_B, digits=2)* -1
21
+
22
+
23
+ # pctirrationaltwo <- param$rho_tilde/param$rho
24
+ #pctirrationaltwo <- signif(pctirrationaltwo, digits=2)
25
+
26
+ mispredictpct <- param$mispredict/param$x_ss
27
+ mispredictpct <- signif(mispredictpct, digits=2)*100
28
+
29
+ pctreductiontemptation <- param$delta_x_temptation/ param$x_ss
30
+ pctreductiontemptationres <- param$delta_x_temptation_res/ param$x_ss
31
+
32
+ pctreductiontemptation <- signif(pctreductiontemptation, digits=2)*100
33
+ pctreductiontemptationres <- signif(pctreductiontemptationres, digits=2)*100
34
+
35
+
36
+ dLpercent <- param$d_L/100
37
+ dLpercent <- signif(dLpercent, digits=3)
38
+
39
+ dCLpercent <- param$d_CL/100
40
+ dCLpercent <- signif(dCLpercent, digits=2)
41
+
42
+ taubtwonice <- signif(param$tau_B_2, digits=2) * -1
43
+ taubtwofullnice <- signif(param$tau_B_2_full , digits=2) * -1
44
+ taubthreenice <- signif(param$tau_B_3, digits=2) * -1
45
+ taubfournice <- signif(param$tau_B_4, digits=2) * -1
46
+ taubfivenice <- signif(param$tau_B_5, digits=2) * -1
47
+
48
+ gammaLeffectnice <- signif(param$gamma_L_effect, digits=1)
49
+ gammaLnice <- signif(param$gamma_L, digits=2)
50
+ gammaBnice <- signif(param$gamma_B, digits=2)
51
+
52
+ naivetenice <- signif(param$naivete, digits=2)
53
+ gammaLeffectresnice <- signif(param$gamma_L_effect_res, digits=1)
54
+ gammaLresnice <- signif(param$gamma_L_res, digits=2)
55
+ gammaBresnice <- signif(param$gamma_B_res, digits=2)
56
+
57
+ naiveteresnice <- signif(param$naivete_res, digits=2)
58
+
59
+ attritionratenice <- signif(param$attritionrate, digits=2)*100
60
+
61
+
62
+ dLnice <- signif(param$d_L, digits=2)*-1
63
+ dCLnice <- signif(param$d_CL, digits=2)*-1
64
+
65
+ underestimatetemp <- format(round(param$underestimatetemp,3), digits=2)
66
+
67
+ tautildeBtwothreenice <- signif(param$tau_tilde_B_3_2, digits=2)*-1
68
+
69
+ MPLStwonice <- signif(param$MPL_S2, digits=2)*-1
70
+
71
+ tauLtwosigned <- signif(param$tau_L_2)*-1
72
+
73
+
74
+
75
+
76
+ #Have hourly variables
77
+ gammaBnicehour <- gammaBnice*60
78
+ gammaLnicehour <- gammaLnice*60
79
+ gammaLeffectnicehour <- gammaLeffectnice*60
80
+ naivetenicehour <- naivetenice*60
81
+ gammaBresnicehour <- gammaBresnice*60
82
+ gammaLresnicehour <- gammaLresnice*60
83
+ gammaLeffectresnicehour <- gammaLeffectresnice*60
84
+ naiveteresnicehour <- naiveteresnice*60
85
+ taubtwohour <- taubtwonice*60
86
+
87
+
88
+
89
+ # Return
90
+ solution <- list(
91
+ mispredictnice = mispredictnice,
92
+ tautildenice = tautildenice,
93
+ taubtwonice = taubtwonice,
94
+ gammaLeffectnice = gammaLeffectnice,
95
+ gammaLnice = gammaLnice,
96
+ gammaBnice = gammaBnice,
97
+ naivetenice = naivetenice,
98
+ gammaLeffectnicehour = gammaLeffectnicehour,
99
+ gammaLnicehour = gammaLnicehour,
100
+ gammaBnicehour = gammaBnicehour,
101
+ naivetenicehour = naivetenicehour,
102
+ taubtwohour = taubtwohour,
103
+ gammaLeffectresnice = gammaLeffectresnice,
104
+ gammaLresnice = gammaLresnice,
105
+ gammaBresnice = gammaBresnice,
106
+ naiveteresnice = naiveteresnice,
107
+ gammaLeffectresnicehour = gammaLeffectresnicehour,
108
+ gammaLresnicehour = gammaLresnicehour,
109
+ gammaBresnicehour = gammaBresnicehour,
110
+ naiveteresnicehour = naiveteresnicehour,
111
+ dLnice = dLnice,
112
+ dCLnice = dCLnice,
113
+ dLpercent = dLpercent,
114
+ dCLpercent = dCLpercent,
115
+ underestimatetemp = underestimatetemp,
116
+ tautildeBtwothreenice = tautildeBtwothreenice,
117
+ limiteffect = limiteffect,
118
+ attritionratenice = attritionratenice,
119
+ taubthreenice = taubthreenice,
120
+ taubfournice = taubfournice,
121
+ taubfivenice = taubfivenice,
122
+ pctreductiontemptation = pctreductiontemptation,
123
+ pctreductiontemptationres = pctreductiontemptationres,
124
+ MPLStwonice = MPLStwonice,
125
+ mispredictpct = mispredictpct,
126
+ taubtwofullnice = taubtwofullnice,
127
+ tauLtwosigned = tauLtwosigned,
128
+ limiteffectlastweeknice = limiteffectlastweeknice)
129
+
130
+ return(solution)
131
+ }
132
+
133
+
134
+
135
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
136
+ # Full model, taub2=full
137
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
138
+
139
+ # Define constants
140
+ param <- param_initial
141
+
142
+ # Estimate model
143
+ param_full <- estimate_model(df, param, full=T, display_warning=F)
144
+
145
+ # Add some auto-import figures
146
+ param_additional_full_taub2 <-
147
+ param_full %>%
148
+ as.list %>%
149
+ list.merge(param_full)
150
+
151
+ save_tex(param_additional_full_taub2, filename="structural_fulltaub2", suffix="fulltaubtwo")
152
+
153
+
154
+ df$w <- 1
155
+
156
+ results <- vector(mode = "list", length = size)
157
+
158
+ results <- run_boot_procedure(run_boot_iter_full)
159
+
160
+ # Get bootstrap distribution
161
+ bottom <- lapply(results, find_bottom)
162
+ top <- lapply(results, find_top)
163
+
164
+ save_boot_tex_percentile(bottom, top,
165
+ suffix="bootfulltaubtwo",
166
+ filename="structural_boot_fulltaubtwo")
167
+
168
+
169
+
170
+
171
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
172
+ # Full model, taub2 half period
173
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
174
+
175
+ # Define constants
176
+ param <- param_initial
177
+
178
+ # Estimate model
179
+ param_full <- estimate_model(df, param)
180
+
181
+ print(param_full$eta)
182
+ print(param_full$zeta)
183
+ check_steady_state(param_full)
184
+
185
+
186
+
187
+
188
+ # Add some auto-import figures
189
+ param_additional <-
190
+ param_full %>%
191
+ as.list %>%
192
+ list.merge(param_full)
193
+
194
+ save_tex(param_additional, filename="structural")
195
+
196
+ # Add some auto-import figures
197
+ param_additional_two <-
198
+ param_full %>%
199
+ as.list %>%
200
+ list.merge(param_full)
201
+
202
+
203
+ save_tex2(param_additional_two, filename="structural_two", suffix="twodigits")
204
+
205
+
206
+ param_additional_nice <-
207
+ param_full %>%
208
+ nice_scalars %>%
209
+ as.list %>%
210
+ list.merge(param_full)
211
+
212
+ save_tex_nice(param_additional_nice, filename="structural_nice", suffix="nice")
213
+
214
+
215
+
216
+
217
+
218
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
219
+ # Balanced model
220
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
221
+
222
+
223
+ # Add weights
224
+ df %<>% balance_data(magnitude=3)
225
+
226
+ # Define constants
227
+ param <- param_initial
228
+ # Estimate model
229
+ param_balanced <- estimate_model(df, param, winsorize=T)
230
+
231
+
232
+ # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
233
+ # # Bootstrap model no perceived habit formation
234
+ # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
235
+ # Revert to unbalanced
236
+ df$w <- 1
237
+
238
+ results <- vector(mode = "list", length = size)
239
+
240
+
241
+ results <- run_boot_procedure(run_boot_iter)
242
+
243
+ # Get bootstrap distribution
244
+ bottom <- lapply(results, find_bottom)
245
+ top <- lapply(results, find_top)
246
+
247
+ plot_time_effects(param_full, bottom, top, filename="structural_time_effects_plot")
248
+
249
+ save_boot_tex_percentile(bottom, top,
250
+ suffix="boot",
251
+ filename="structural_boot")
252
+
253
+ plot_decomposition_boot(param_full, bottom, top,
254
+ filename="structural_decomposition_plot_boot")
255
+
256
+ plot_decomposition_boot_unique(param_full, bottom, top,
257
+ filename="structural_decomposition_plot_boot_restricted")
258
+
259
+ plot_decomposition_boot_etas(param_full, bottom, top,
260
+ filename="structural_decomposition_plot_boot_restricted_etas")
261
+
262
+ plot_time_effects_both_est(param_full, bottom, top, filename="time_effects_both_est")
263
+
264
+
265
+
266
+
267
+
268
+
269
+ # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
270
+ # # Bootstrap balanced model no perceived
271
+ # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
272
+ results_bal <- vector(mode = "list", length = size)
273
+
274
+
275
+ #CHANGE MAGNITUDE OF WEIGHTS ONCE ITS SORTED!
276
+ results_bal <- run_boot_procedure(run_boot_iter_bal)
277
+
278
+
279
+ # Get bootstrap distribution
280
+ median_bal <- lapply(results_bal, median, na.rm = T)
281
+ sdevs_bal <- lapply(results_bal, sd, na.rm = T)
282
+ bottom_bal <- lapply(results_bal, find_bottom)
283
+ top_bal <- lapply(results_bal, find_top)
284
+
285
+ save_tex(param_balanced, filename="balanced_median", suffix="balancedmedian")
286
+
287
+ #For restricted model
288
+
289
+ plot_time_effects_bal(param_full, param_balanced, bottom, top, bottom_bal, top_bal, filename="time_effects_balanced")
290
+ plot_time_effects_both(param_full, param_balanced, bottom, top, bottom_bal, top_bal, filename="time_effects_both")
291
+
292
+
293
+ save_boot_tex_percentile(bottom_bal, top_bal,
294
+ suffix="balanced",
295
+ filename="balanced_boot")
17/replication_package/code/analysis/structural/input.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aacb5e3a47846afaf251dbe069f5cee136e1255fe4ec43721a033fafc1d837d
3
+ size 812
17/replication_package/code/analysis/structural/make.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################
2
+ ### ENVIRONMENT ###
3
+ ###################
4
+ import git
5
+ import imp
6
+ import os
7
+
8
+ ### SET DEFAULT PATHS
9
+ ROOT = '../..'
10
+
11
+ PATHS = {
12
+ 'root' : ROOT,
13
+ 'lib' : os.path.join(ROOT, 'lib'),
14
+ 'config' : os.path.join(ROOT, 'config.yaml'),
15
+ 'config_user' : os.path.join(ROOT, 'config_user.yaml'),
16
+ 'input_dir' : 'input',
17
+ 'external_dir' : 'external',
18
+ 'output_dir' : 'output',
19
+ 'output_local_dir' : 'output_local',
20
+ 'makelog' : 'log/make.log',
21
+ 'output_statslog' : 'log/output_stats.log',
22
+ 'source_maplog' : 'log/source_map.log',
23
+ 'source_statslog' : 'log/source_stats.log',
24
+ }
25
+
26
+ ### LOAD GSLAB MAKE
27
+ f, path, desc = imp.find_module('gslab_make', [PATHS['lib']])
28
+ gs = imp.load_module('gslab_make', f, path, desc)
29
+
30
+ ### LOAD CONFIG USER
31
+ PATHS = gs.update_paths(PATHS)
32
+ gs.update_executables(PATHS)
33
+
34
+ ############
35
+ ### MAKE ###
36
+ ############
37
+
38
+ ### START MAKE
39
+ gs.remove_dir(['input', 'external'])
40
+ gs.clear_dir(['output', 'log', 'temp'])
41
+ gs.start_makelog(PATHS)
42
+
43
+ ### GET INPUT FILES
44
+ inputs = gs.link_inputs(PATHS, ['input.txt'])
45
+ # gs.write_source_logs(PATHS, inputs + externals)
46
+ # gs.get_modified_sources(PATHS, inputs + externals)
47
+
48
+ ### RUN SCRIPTS
49
+ """
50
+ Critical
51
+ --------
52
+ Many of the Stata analysis scripts recode variables using
53
+ the `recode` command. Double-check all `recode` commands
54
+ to confirm recoding is correct, especially when reusing
55
+ code for a different experiment version.
56
+ """
57
+
58
+ gs.run_r(PATHS, program = 'code/StructuralModel.R')
59
+
60
+ ### LOG OUTPUTS
61
+ gs.log_files_in_output(PATHS)
62
+
63
+ ### CHECK FILE SIZES
64
+ #gs.check_module_size(PATHS)
65
+
66
+ ### END MAKE
67
+ gs.end_makelog(PATHS)
17/replication_package/code/analysis/treatment_effects/README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # README
2
+
3
+ This module produces model-free estimates of treatment effects.
4
+
5
+ `/code/` contains the below files :
6
+ * Beliefs.do (compares actual treatment effect with predicted treatment effect)
7
+
8
+ * CommitmentResponse.do (plots how treatment effect differs by SMS addiction scale and other survey indicators)
9
+
10
+ * FDRTable.do (estimates how treatment effect differs by SMS addiction scale and other indicators, adjusted for false-discovery rate. Also plots some descriptive statistics)
11
+
12
+ * HabitFormation.do (compares actual and predicted usage)
13
+
14
+ * Heterogeneity.do (plots heterogeneous treatment effects)
15
+
16
+ * HeterogeneityInstrumental.do (plots heterogeneous treatment effects)
17
+
18
+ * ModelHeterogeneity.R (generates other heterogeneity plots, some temptation plots)
19
+
20
+ * SurveyValidation.do (plots effect of rewarding accurate usage prediction on usage prediction accuracy)
21
+
22
+ The script `ModelHeterogeneity.R` requires the dataset `AnalysisUser.dta` when calling the function `get_opt()`. This function computes the number of users who opted out of the limit functionality. Since this dataset contains PII, it has been omitted from this replication package. As such, the call to `get_opt()` (l.1396) has been commented out so it does not prevent the user from smoothly running this module.
17/replication_package/code/analysis/treatment_effects/code/Beliefs.do ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Naivete about past and future usage
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ end
18
+
19
+ program define_plot_settings
20
+ global CISPIKE_VERTICAL_GRAPHOPTS ///
21
+ ylabel(#6) ///
22
+ xsize(6.5) ysize(4.5) ///
23
+
24
+ global CISPIKE_HORIZONTAL_GRAPHOPTS ///
25
+ xlabel(#6) ///
26
+ xsize(6.5) ysize(8.5)
27
+
28
+ global CISPIKE_STACKED_GRAPHOPTS ///
29
+ xcommon row(2) ///
30
+ graphregion(color(white)) ///
31
+ xsize(6.5) ysize(8.5)
32
+
33
+ global CISPIKE_SETTINGS ///
34
+ spikecolor(maroon black navy gray) ///
35
+ cicolor(maroon black navy gray) ///
36
+ spike(msymbol(O)||msymbol(S)||msymbol(D)||msymbol(T))
37
+
38
+ global COEFPLOT_VERTICAL_SETTINGS ///
39
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon)) ///
40
+ yline(0, lwidth(thin) lcolor(black)) ///
41
+ bgcolor(white) graphregion(color(white)) ///
42
+ legend(region(lcolor(white))) ///
43
+ xsize(6.5) ysize(4.5) ///
44
+ ytitle("Treatment effect (minutes/day)" " ")
45
+
46
+ global COLOR_MAROON ///
47
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
48
+
49
+ global COLOR_GRAY ///
50
+ mcolor(gray) ciopts(recast(rcap) lcolor(gray))
51
+
52
+ global COLOR_BLACK ///
53
+ mcolor(black) ciopts(recast(rcap) lcolor(black))
54
+
55
+ global COLOR_NAVY ///
56
+ mcolor(navy) ciopts(recast(rcap) lcolor(navy))
57
+ end
58
+
59
+ **********************
60
+ * Analysis functions *
61
+ **********************
62
+
63
+ program main
64
+ define_constants
65
+ define_plot_settings
66
+ import_data
67
+
68
+ plot_naivete_all
69
+ plot_naivete_all, sixty
70
+ plot_naivete_all, hundred
71
+
72
+ reg_bonus
73
+ reg_bonus_S2
74
+ reg_bonus_new
75
+ end
76
+
77
+ program import_data
78
+ use "input/final_data_sample.dta", clear
79
+ end
80
+
81
+ program plot_naivete_all
82
+ syntax, [sixty hundred]
83
+
84
+ local suffix ""
85
+ local winsorization "W0"
86
+ if ("`sixty'" == "sixty"){
87
+ local suffix "_W"
88
+ local winsorization "W60"
89
+ }
90
+ if ("`hundred'" == "hundred"){
91
+ local suffix "_W100"
92
+ local winsorization "W100"
93
+ }
94
+
95
+ * Preserve data
96
+ preserve
97
+
98
+ * Reshape data
99
+ rename PD_*_UsageFITSBY UsageActual_*
100
+
101
+ rename S2_PredictUseNext_1`suffix' UsagePredicted0_P2
102
+
103
+ rename S3_PredictUseNext_1`suffix' UsagePredicted0_P3
104
+ rename S2_PredictUseNext_2`suffix' UsagePredicted1_P3
105
+
106
+ rename S4_PredictUseNext_1`suffix' UsagePredicted0_P4
107
+ rename S3_PredictUseNext_2`suffix' UsagePredicted1_P4
108
+ rename S2_PredictUseNext_3`suffix' UsagePredicted2_P4
109
+
110
+ rename S4_PredictUseNext_2`suffix' UsagePredicted1_P5
111
+ rename S3_PredictUseNext_3`suffix' UsagePredicted2_P5
112
+
113
+ keep UserID S3_Bonus S2_LimitType UsagePredicted* UsageActual*
114
+ keep UserID S3_Bonus S2_LimitType *_P2 *_P3 *_P4 *_P5
115
+ reshape long Usage, i(UserID S3_Bonus S2_LimitType) j(j) string
116
+
117
+ split j, p(_)
118
+ rename (j1 j2) (measure time)
119
+
120
+ * Recode data
121
+ encode time, generate(time_encode)
122
+ encode measure, generate(measure_encode)
123
+
124
+ recode time_encode ///
125
+ (1 = 1 "Period 2") ///
126
+ (2 = 2 "Period 3") ///
127
+ (3 = 3 "Period 4") ///
128
+ (4 = 4 "Period 5"), ///
129
+ gen(time_recode)
130
+
131
+ recode measure_encode ///
132
+ (1 = 1 "Actual") ///
133
+ (2 = 2 "Survey t prediction") ///
134
+ (3 = 3 "Survey t-1 prediction") ///
135
+ (4 = 4 "Survey t-2 prediction"), ///
136
+ gen(measure_recode)
137
+
138
+ * Plot data
139
+ cispike Usage if S3_Bonus == 0 & S2_LimitType == 0, ///
140
+ over1(measure_recode) over2(time_recode) ///
141
+ $CISPIKE_SETTINGS ///
142
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
143
+ ytitle("Usage (minutes/day)" " "))
144
+
145
+ graph export "output/cispike_naivete_BcontrolxLcontrol_`winsorization'.pdf", replace
146
+
147
+ cispike Usage if S3_Bonus == 1 & S2_LimitType == 0, ///
148
+ over1(measure_recode) over2(time_recode) ///
149
+ $CISPIKE_SETTINGS ///
150
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
151
+ ytitle("Usage (minutes/day)" " "))
152
+
153
+ graph export "output/cispike_naivete_BtreatxLcontrol_`winsorization'.pdf", replace
154
+
155
+ cispike Usage if S3_Bonus == 0 & S2_LimitType > 0, ///
156
+ over1(measure_recode) over2(time_recode) ///
157
+ $CISPIKE_SETTINGS ///
158
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
159
+ ytitle("Usage (minutes/day)" " "))
160
+
161
+ graph export "output/cispike_naivete_BcontrolxLtreat_`winsorization'.pdf", replace
162
+
163
+ cispike Usage if S3_Bonus == 1 & S2_LimitType > 0, ///
164
+ over1(measure_recode) over2(time_recode) ///
165
+ $CISPIKE_SETTINGS ///
166
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
167
+ ytitle("Usage (minutes/day)" " "))
168
+
169
+ graph export "output/cispike_naivete_BtreatxLtreat_`winsorization'.pdf", replace
170
+
171
+ * Restore data
172
+ restore
173
+ end
174
+
175
+ program reg_bonus
176
+ est clear
177
+
178
+ preserve
179
+
180
+ gen S1_Usage_FITSBY = PD_P1_UsageFITSBY
181
+ gen S3_Usage_FITSBY = PD_P3_UsageFITSBY
182
+ gen S4_Usage_FITSBY = PD_P4_UsageFITSBY
183
+ gen S5_Usage_FITSBY = PD_P5_UsageFITSBY
184
+
185
+ gen S2_Predict_FITSBY = S2_PredictUseNext_1_W
186
+ gen S3_Predict_FITSBY = S3_PredictUseNext_1_W
187
+ gen S4_Predict_FITSBY = S3_PredictUseNext_2_W
188
+ gen S5_Predict_FITSBY = S3_PredictUseNext_3_W
189
+
190
+ * Run regressions
191
+ foreach survey in S3 S4 S5 {
192
+ local yvar `survey'_Usage_FITSBY
193
+ local baseline S1_Usage_FITSBY
194
+
195
+ gen_treatment, suffix(_`survey') simple
196
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`survey') simple
197
+ est store `yvar'
198
+ }
199
+
200
+ * Run regressions
201
+ foreach survey in S3 S4 S5 {
202
+ local yvar `survey'_Predict_FITSBY
203
+ local baseline S1_Usage_FITSBY
204
+
205
+ gen_treatment, suffix(_`survey') simple
206
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`survey') simple
207
+ est store `yvar'
208
+ }
209
+
210
+ gen S2_reduction = S2_PredictUseInitial_W * - (S2_PredictUseBonus / 100)
211
+
212
+ cap drop B_S3
213
+ gen B_S3 = 1
214
+ reg S2_reduction B_S3, noconstant
215
+ est store S2_reduction
216
+
217
+ * Plot regressions (by period)
218
+ coefplot (*Usage*, label("Actual") $COLOR_MAROON msymbol(O)) ///
219
+ (S2_reduction, label("Survey 2 MPL prediction") $COLOR_NAVY msymbol(S)) ///
220
+ (*Predict*, label("Survey 3 prediction") $COLOR_GRAY msymbol(D)), ///
221
+ keep(B_*) ///
222
+ vertical ///
223
+ $COEFPLOT_VERTICAL_SETTINGS ///
224
+ xlabel(1 "Period 3" 2 "Period 4" 3 "Period 5", ///
225
+ valuelabel angle(0))
226
+
227
+ graph export "output/coef_belief_bonus_effect.pdf", replace
228
+
229
+ restore
230
+ end
231
+
232
+ program reg_bonus_new
233
+ est clear
234
+
235
+ preserve
236
+
237
+ gen S1_Usage_FITSBY = PD_P1_UsageFITSBY
238
+ gen S2_Usage_FITSBY = PD_P2_UsageFITSBY
239
+ gen S3_Usage_FITSBY = PD_P3_UsageFITSBY
240
+ gen S4_Usage_FITSBY = PD_P4_UsageFITSBY
241
+ gen S5_Usage_FITSBY = PD_P5_UsageFITSBY
242
+
243
+ gen S2_Predict_FITSBY = S2_PredictUseNext_1_W
244
+ gen S3_Predict_FITSBY = S3_PredictUseNext_1_W
245
+ gen S4_Predict_FITSBY = S3_PredictUseNext_2_W
246
+ gen S5_Predict_FITSBY = S3_PredictUseNext_3_W
247
+
248
+ * Run regressions
249
+ foreach survey in S2 S3 S4 S5 {
250
+ local yvar `survey'_Usage_FITSBY
251
+ local baseline S1_Usage_FITSBY
252
+
253
+ gen_treatment, suffix(_`survey') simple
254
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`survey') simple
255
+ est store `yvar'
256
+ }
257
+
258
+ * Run regressions
259
+ foreach survey in S3 S4 S5 {
260
+ local yvar `survey'_Predict_FITSBY
261
+ local baseline S1_Usage_FITSBY
262
+
263
+ gen_treatment, suffix(_`survey') simple
264
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`survey') simple
265
+ est store `yvar'
266
+ }
267
+
268
+ gen S2_reduction = S2_PredictUseInitial_W * - (S2_PredictUseBonus / 100)
269
+
270
+ cap drop B_S3
271
+ gen B_S3 = 1
272
+ reg S2_reduction B_S3, noconstant
273
+ est store S2_reduction
274
+
275
+ matrix C = J(3,1,.)
276
+ matrix rownames C = mean ll ul
277
+ matrix colnames C = B_S2
278
+
279
+ * TODO: make this reproducible
280
+ matrix C[1,1] = -16.11756 \ -19.64522 \ -12.62825
281
+ matrix list C
282
+ coefplot matrix(C), ci((2 3))
283
+
284
+ * Plot regressions (by period)
285
+ coefplot (matrix(C), ci((2 3)) label("Makes {&alpha} = 0") $COLOR_BLACK) ///
286
+ (S2_reduction, label("Survey 2 MPL prediction") $COLOR_NAVY) ///
287
+ (*Usage*, label("Actual") $COLOR_MAROON) ///
288
+ (*Predict*, label("Survey 3 prediction") $COLOR_GRAY), ///
289
+ keep(B_*) ///
290
+ vertical ///
291
+ $COEFPLOT_VERTICAL_SETTINGS ///
292
+ xlabel(1 "Period 2" 2 "Period 3" 3 "Period 4" 4 "Period 5", ///
293
+ valuelabel angle(0))
294
+
295
+ graph export "output/coef_belief_bonus_effect_new.pdf", replace
296
+
297
+ restore
298
+ end
299
+
300
+ program reg_bonus_S2
301
+
302
+ est clear
303
+
304
+ preserve
305
+
306
+ gen S1_Usage_FITSBY = PD_P1_UsageFITSBY
307
+ gen S2_Usage_FITSBY = PD_P2_UsageFITSBY
308
+ gen S3_Usage_FITSBY = PD_P3_UsageFITSBY
309
+
310
+ gen S2_Predict_FITSBY = S2_PredictUseNext_1_W
311
+ gen S3_Predict_FITSBY = S2_PredictUseNext_2_W
312
+
313
+ * Run regressions
314
+ foreach survey in S2 S3 {
315
+ local yvar `survey'_Usage_FITSBY
316
+ local baseline S1_Usage_FITSBY
317
+
318
+ gen_treatment, suffix(_`survey') simple
319
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`survey') simple
320
+ est store `yvar'
321
+ }
322
+
323
+ * Run regressions
324
+ foreach survey in S2 S3 {
325
+ local yvar `survey'_Predict_FITSBY
326
+ local baseline S1_Usage_FITSBY
327
+
328
+ gen_treatment, suffix(_`survey') simple
329
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`survey') simple
330
+ est store `yvar'
331
+ }
332
+
333
+ gen S2_reduction = S2_PredictUseInitial_W * - (S2_PredictUseBonus / 100)
334
+
335
+ cap drop B_S2
336
+ gen B_S2 = 1
337
+ reg S2_reduction B_S2, noconstant
338
+ est store S2_reduction
339
+
340
+
341
+ * Plot regressions (by period)
342
+ coefplot (*Usage*, label("Actual") $COLOR_MAROON) ///
343
+ (*Predict*, label("Predicted") $COLOR_GRAY) ///
344
+ (S2_reduction, label("Bonus Predicted") $COLOR_NAVY), ///
345
+ keep(B_*) ///
346
+ vertical ///
347
+ $COEFPLOT_VERTICAL_SETTINGS ///
348
+ xlabel(1 "Period 2" 2 "Period 3", ///
349
+ valuelabel angle(0))
350
+
351
+ graph export "output/coef_belief_bonus_survey2.pdf", replace
352
+
353
+ restore
354
+ end
355
+ ***********
356
+ * Execute *
357
+ ***********
358
+
359
+ main
17/replication_package/code/analysis/treatment_effects/code/CommitmentResponse.do ADDED
@@ -0,0 +1,1404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Response to commitment, moderated by demand for flexibility
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ yaml global STRATA = YAML.metadata.strata
18
+ end
19
+
20
+ program define_plot_settings
21
+ global CISPIKE_SETTINGS ///
22
+ spikecolor(maroon black gray) ///
23
+ cicolor(maroon black gray)
24
+
25
+ global CISPIKE_DOUBLE_SETTINGS ///
26
+ spike(yaxis(1) || yaxis(2)) ///
27
+ ci(yaxis(1) || yaxis(2)) ///
28
+ spikecolor(maroon gray) ///
29
+ cicolor(maroon gray)
30
+
31
+ global CISPIKE_VERTICAL_GRAPHOPTS ///
32
+ ylabel(#6) ///
33
+ xsize(6.5) ysize(4.5) ///
34
+ legend(cols(4))
35
+
36
+ global COLOR_MAROON ///
37
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
38
+
39
+ global COLOR_MAROON_LIGHT ///
40
+ mcolor(maroon*0.9) ciopts(recast(rcap) lcolor(maroon*0.9))
41
+
42
+ global COLOR_MAROON_DARK ///
43
+ mcolor(maroon*1.1) ciopts(recast(rcap) lcolor(maroon*1.1))
44
+
45
+
46
+ global COLOR_GRAY_LIGHT ///
47
+ mcolor(gray*0.9) ciopts(recast(rcap) lcolor(gray*0.9))
48
+
49
+ global COLOR_GRAY_DARK ///
50
+ mcolor(gray*1.1) ciopts(recast(rcap) lcolor(gray*1.1))
51
+
52
+
53
+ global COLOR_BLUE ///
54
+ mcolor(edkblue) ciopts(recast(rcap) lcolor(edkblue))
55
+
56
+ global COLOR_BLACK ///
57
+ mcolor(black) ciopts(recast(rcap) lcolor(black))
58
+
59
+ global COLOR_GRAY ///
60
+ mcolor(gray) ciopts(recast(rcap) lcolor(gray))
61
+
62
+ global COLOR_NAVY ///
63
+ mcolor(navy) ciopts(recast(rcap) lcolor(navy))
64
+
65
+ global COLOR_NAVY_LIGHT ///
66
+ mcolor(navy*0.5) ciopts(recast(rcap) lcolor(navy*0.5))
67
+
68
+ global COEFPLOT_SETTINGS_MINUTES ///
69
+ vertical ///
70
+ yline(0, lwidth(thin) lcolor(black)) ///
71
+ bgcolor(white) graphregion(color(white)) ///
72
+ legend(cols(4) region(lcolor(white))) ///
73
+ xsize(6.5) ysize(4.5) ///
74
+ ytitle("Treatment effect (minutes/day)" " ")
75
+
76
+ global COEFPLOT_SETTINGS_THIN ///
77
+ vertical ///
78
+ yline(0, lwidth(thin) lcolor(black)) ///
79
+ bgcolor(white) graphregion(color(white)) ///
80
+ legend(cols(4) region(lcolor(white))) ///
81
+ xsize(4.5) ysize(4.5) ///
82
+ ytitle("Treatment effect (minutes/day)" " ")
83
+
84
+ global COEFPLOT_SETTINGS_STD ///
85
+ xline(0, lwidth(thin) lcolor(black)) ///
86
+ bgcolor(white) graphregion(color(white)) grid(w) ///
87
+ legend(rows(1) region(lcolor(white))) ///
88
+ xsize(6.5) ysize(4.5) ///
89
+ xtitle(" " "Treatment effect (standard deviations per hour/day of use)")
90
+
91
+ global COEFPLOT_SETTINGS_ITT ///
92
+ xline(0, lwidth(thin) lcolor(black)) ///
93
+ bgcolor(white) graphregion(color(white)) grid(w) ///
94
+ legend(rows(1) region(lcolor(white))) ///
95
+ xsize(6.5) ysize(4.5) ///
96
+ xtitle(" " "Treatment effect (standard deviations)")
97
+
98
+ global COEFPLOT_LABELS_LIMIT ///
99
+ coeflabels(L_1 = `"Snooze 0"' ///
100
+ L_2 = `"Snooze 2"' ///
101
+ L_3 = `"Snooze 5"' ///
102
+ L_4 = `"Snooze 20"' ///
103
+ L_5 = `"No snooze"' ///
104
+ L = `"Limit"' ///
105
+ B = `"Bonus"')
106
+
107
+ global COEFPLOT_STACKED_GRAPHOPTS ///
108
+ ycommon row(2) ///
109
+ graphregion(color(white)) ///
110
+ xsize(6.5) ysize(8.5)
111
+
112
+ global COEFPLOT_ADDICTION_SETTINGS ///
113
+ xline(0, lwidth(thin) lcolor(black)) ///
114
+ bgcolor(white) graphregion(color(white)) grid(w) ///
115
+ legend(rows(1) region(lcolor(white))) ///
116
+ xsize(7) ysize(6.5)
117
+
118
+ global ADDICTION_LABELS ///
119
+ xlabel(, labsize(small)) ///
120
+ xtitle(, size(small)) ///
121
+ ylabel(, labsize(vsmall)) ///
122
+ ytitle(, size(small)) ///
123
+ legend(size(small))
124
+ end
125
+
126
+ **********************
127
+ * Analysis functions *
128
+ **********************
129
+
130
+ program main
131
+ define_constants
132
+ define_plot_settings
133
+ import_data
134
+
135
+ reg_usage
136
+ reg_usage, fitsby
137
+ reg_usage_simple
138
+ reg_usage_simple, fitsby
139
+ reg_usage_simple_balanced
140
+ reg_usage_simple_balanced, fitsby
141
+ plot_snooze
142
+ plot_snooze, fitsby
143
+ plot_snooze, minutes
144
+ plot_snooze, fitsby minutes
145
+ plot_snooze_by_limit
146
+ plot_snooze_by_limit, fitsby
147
+ plot_snooze_by_limit, minutes
148
+ plot_snooze_by_limit, fitsby minutes
149
+ plot_snooze_both
150
+ plot_snooze_both, fitsby
151
+ plot_snooze_both_by_limit
152
+ plot_snooze_both_by_limit, fitsby
153
+ plot_phone_use_change
154
+ plot_phone_use_change_simple
155
+ reg_usage_interaction
156
+ reg_usage_interaction, fitsby
157
+ reg_self_control
158
+ reg_self_control_null
159
+ reg_iv_self_control
160
+ reg_usage_simple_weekly
161
+ reg_usage_simple_weekly, fitsby
162
+ reg_usage_simple_daily_p12
163
+ reg_usage_simple_daily_p12, fitsby
164
+ reg_addiction_simple
165
+ reg_sms_addiction_simple
166
+ reg_swb_simple
167
+ reg_swb_icw_simple
168
+ reg_sms_addiction_simple_weekly
169
+ reg_substitution
170
+ end
171
+
172
+ program import_data
173
+ use "input/final_data_sample.dta", clear
174
+ end
175
+
176
+ program reg_usage
177
+ syntax, [fitsby]
178
+
179
+ est clear
180
+
181
+ * Determine FITSBY restriction
182
+ if ("`fitsby'" == "fitsby") {
183
+ local fitsby "FITSBY"
184
+ local suffix "_fitsby"
185
+ }
186
+ else {
187
+ local fitsby ""
188
+ local suffix ""
189
+ }
190
+
191
+ * Run regressions
192
+ foreach yvar in PD_P2_Usage`fitsby' ///
193
+ PD_P3_Usage`fitsby' ///
194
+ PD_P4_Usage`fitsby' ///
195
+ PD_P5_Usage`fitsby' ///
196
+ PD_P432_Usage`fitsby' ///
197
+ PD_P5432_Usage`fitsby' {
198
+ local baseline PD_P1_Usage`fitsby'
199
+
200
+ gen_treatment
201
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline')
202
+ est store `yvar'
203
+ }
204
+
205
+ * Plot regressions (by period)
206
+ coefplot (PD_P2_Usage`fitsby', label("Period 2") $COLOR_MAROON) ///
207
+ (PD_P3_Usage`fitsby', label("Period 3") $COLOR_BLACK) ///
208
+ (PD_P4_Usage`fitsby', label("Period 4") $COLOR_NAVY) ///
209
+ (PD_P5_Usage`fitsby', label("Period 5") $COLOR_GRAY) , ///
210
+ keep(L_*) order(L_1 L_2 L_3 L_4 L_5) ///
211
+ $COEFPLOT_SETTINGS_MINUTES ///
212
+ $COEFPLOT_LABELS_LIMIT
213
+
214
+ graph export "output/coef_usage`suffix'.pdf", replace
215
+
216
+ * Plot regressions (all period)
217
+ coefplot (PD_P5432_Usage`fitsby', label("Period 2 to 5") $COLOR_MAROON), ///
218
+ keep(L_*) order(L_1 L_2 L_3 L_4 L_5) ///
219
+ $COEFPLOT_SETTINGS_MINUTES ///
220
+ $COEFPLOT_LABELS_LIMIT ///
221
+ legend(off)
222
+
223
+ graph export "output/coef_usage_combined`suffix'.pdf", replace
224
+ end
225
+
226
+ program reg_usage_simple
227
+ syntax, [fitsby]
228
+
229
+ est clear
230
+
231
+ * Determine FITSBY restriction
232
+ if ("`fitsby'" == "fitsby") {
233
+ local fitsby "FITSBY"
234
+ local suffix "_fitsby"
235
+ }
236
+ else {
237
+ local fitsby ""
238
+ local suffix ""
239
+ }
240
+
241
+ * Run regressions
242
+ foreach yvar in PD_P2_Usage`fitsby' ///
243
+ PD_P3_Usage`fitsby' ///
244
+ PD_P4_Usage`fitsby' ///
245
+ PD_P5_Usage`fitsby' ///
246
+ PD_P432_Usage`fitsby' ///
247
+ PD_P5432_Usage`fitsby' {
248
+ local baseline PD_P1_Usage`fitsby'
249
+
250
+ gen_treatment, simple
251
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
252
+ est store `yvar'
253
+ }
254
+
255
+ * Plot regressions (by period)
256
+ coefplot (PD_P2_Usage`fitsby', label("Period 2") $COLOR_MAROON msymbol(O)) ///
257
+ (PD_P3_Usage`fitsby', label("Period 3") $COLOR_BLACK msymbol(S)) ///
258
+ (PD_P4_Usage`fitsby', label("Period 4") $COLOR_NAVY msymbol(D)) ///
259
+ (PD_P5_Usage`fitsby', label("Period 5") $COLOR_GRAY msymbol(T)), ///
260
+ keep(B L) order(B L) ///
261
+ $COEFPLOT_SETTINGS_MINUTES ///
262
+ $COEFPLOT_LABELS_LIMIT
263
+
264
+ graph export "output/coef_usage_simple`suffix'.pdf", replace
265
+
266
+ * Plot regressions (by period)
267
+ coefplot (PD_P2_Usage`fitsby', label("Period 2") $COLOR_MAROON msymbol(O)) ///
268
+ (PD_P3_Usage`fitsby', label("Period 3") $COLOR_MAROON msymbol(S)) ///
269
+ (PD_P4_Usage`fitsby', label("Period 4") $COLOR_MAROON msymbol(D)) ///
270
+ (PD_P5_Usage`fitsby', label("Period 5") $COLOR_MAROON msymbol(T)), ///
271
+ keep(B) order(B) ///
272
+ $COEFPLOT_SETTINGS_THIN ///
273
+ $COEFPLOT_LABELS_LIMIT
274
+
275
+ graph export "output/coef_usage_simple`suffix'_bonus_only.pdf", replace
276
+
277
+ * Plot regressions (by period)
278
+ coefplot (PD_P2_Usage`fitsby', label("Period 2") $COLOR_GRAY msymbol(O)) ///
279
+ (PD_P3_Usage`fitsby', label("Period 3") $COLOR_GRAY msymbol(S)) ///
280
+ (PD_P4_Usage`fitsby', label("Period 4") $COLOR_GRAY msymbol(D)) ///
281
+ (PD_P5_Usage`fitsby', label("Period 5") $COLOR_GRAY msymbol(T)), ///
282
+ keep(L) order(L) ///
283
+ ysc(r(-60 0)) ///
284
+ ylabel(-60(20)0) ///
285
+ $COEFPLOT_SETTINGS_THIN ///
286
+ $COEFPLOT_LABELS_LIMIT //
287
+
288
+ graph export "output/coef_usage_simple`suffix'_limit_only.pdf", replace
289
+
290
+
291
+ * Plot regressions (all period)
292
+ coefplot (PD_P5432_Usage`fitsby', label("Period 2 to 5") $COLOR_MAROON), ///
293
+ keep(B L) order(B L) ///
294
+ $COEFPLOT_SETTINGS_MINUTES ///
295
+ $COEFPLOT_LABELS_LIMIT ///
296
+ legend(off)
297
+
298
+ graph export "output/coef_usage_combined_simple`suffix'.pdf", replace
299
+ end
300
+
301
+ program reg_usage_simple_balanced
302
+ syntax, [fitsby]
303
+
304
+ est clear
305
+
306
+ preserve
307
+
308
+ local income 43.01
309
+ local college 0.3009
310
+ local male 0.4867
311
+ local white 0.73581
312
+ local age 47.6
313
+
314
+ ebalance balance_income balance_college balance_male balance_white balance_age, ///
315
+ manualtargets(`income' `college' `male' `white' `age') ///
316
+ generate(weight)
317
+
318
+ * Determine FITSBY restriction
319
+ if ("`fitsby'" == "fitsby") {
320
+ local fitsby "FITSBY"
321
+ local suffix "_fitsby"
322
+ }
323
+ else {
324
+ local fitsby ""
325
+ local suffix ""
326
+ }
327
+
328
+ * Run regressions
329
+ foreach yvar in PD_P2_Usage`fitsby' ///
330
+ PD_P3_Usage`fitsby' ///
331
+ PD_P4_Usage`fitsby' ///
332
+ PD_P5_Usage`fitsby' ///
333
+ PD_P432_Usage`fitsby' ///
334
+ PD_P5432_Usage`fitsby' {
335
+ local baseline PD_P1_Usage`fitsby'
336
+
337
+ gen_treatment, simple
338
+
339
+ reg `yvar' B L $STRATA `baseline' [w=weight], robust
340
+ est store `yvar'
341
+ }
342
+
343
+ * Plot regressions (by period)
344
+ coefplot (PD_P2_Usage`fitsby', label("Period 2") $COLOR_MAROON) ///
345
+ (PD_P3_Usage`fitsby', label("Period 3") $COLOR_BLACK) ///
346
+ (PD_P4_Usage`fitsby', label("Period 4") $COLOR_NAVY) ///
347
+ (PD_P5_Usage`fitsby', label("Period 5") $COLOR_GRAY), ///
348
+ keep(B L) order(B L) ///
349
+ $COEFPLOT_SETTINGS_MINUTES ///
350
+ $COEFPLOT_LABELS_LIMIT
351
+
352
+ graph export "output/coef_usage_simple_balanced`suffix'.pdf", replace
353
+
354
+ restore
355
+ end
356
+ program reg_substitution
357
+ est clear
358
+
359
+ gen_treatment, simple
360
+ reg_treatment, yvar(S4_Substitution) indep($STRATA) simple
361
+ est store S4_Substitution
362
+
363
+ * Plot regressions (all period)
364
+ coefplot (S4_Substitution, $COLOR_MAROON), ///
365
+ keep(B L) order(B L) ///
366
+ $COEFPLOT_SETTINGS_MINUTES ///
367
+ $COEFPLOT_LABELS_LIMIT ///
368
+ legend(off)
369
+
370
+ graph export "output/coef_self_reported_substitution.pdf", replace
371
+
372
+ gen_treatment, simple
373
+ reg_treatment, yvar(S4_Substitution_W) indep($STRATA) simple
374
+ est store S4_Substitution_W
375
+
376
+ * Plot regressions (all period)
377
+ coefplot (S4_Substitution_W, $COLOR_MAROON), ///
378
+ keep(B L) order(B L) ///
379
+ $COEFPLOT_SETTINGS_MINUTES ///
380
+ $COEFPLOT_LABELS_LIMIT ///
381
+ legend(off)
382
+
383
+ graph export "output/coef_self_reported_substitution_w.pdf", replace
384
+ end
385
+
386
+
387
+ program reg_usage_simple_weekly
388
+ syntax, [fitsby]
389
+
390
+ est clear
391
+
392
+ * Determine FITSBY restriction
393
+ if ("`fitsby'" == "fitsby") {
394
+ local fitsby "FITSBY"
395
+ local suffix "_fitsby"
396
+ }
397
+ else {
398
+ local fitsby ""
399
+ local suffix ""
400
+ }
401
+
402
+ * Run regressions
403
+ foreach yvar in PD_WeeklyUsage`fitsby'_4 ///
404
+ PD_WeeklyUsage`fitsby'_5 ///
405
+ PD_WeeklyUsage`fitsby'_6 ///
406
+ PD_WeeklyUsage`fitsby'_7 ///
407
+ PD_WeeklyUsage`fitsby'_8 ///
408
+ PD_WeeklyUsage`fitsby'_9 ///
409
+ PD_WeeklyUsage`fitsby'_10 ///
410
+ PD_WeeklyUsage`fitsby'_11 ///
411
+ PD_WeeklyUsage`fitsby'_12 ///
412
+ PD_WeeklyUsage`fitsby'_13 ///
413
+ PD_WeeklyUsage`fitsby'_14 ///
414
+ PD_WeeklyUsage`fitsby'_15 {
415
+ local baseline PD_WeeklyUsage`fitsby'_3
416
+
417
+ gen_treatment, simple
418
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
419
+ est store `yvar'
420
+ }
421
+
422
+ * Plot regressions (by period)
423
+ coefplot (PD_WeeklyUsage`fitsby'_4 , label("Week 4") $COLOR_MAROON msymbol(O)) ///
424
+ (PD_WeeklyUsage`fitsby'_5 , label("Week 5") $COLOR_BLACK msymbol(S)) ///
425
+ (PD_WeeklyUsage`fitsby'_6 , label("Week 6") $COLOR_GRAY msymbol(D)) ///
426
+ (PD_WeeklyUsage`fitsby'_7 , label("Week 7") $COLOR_MAROON msymbol(O)) ///
427
+ (PD_WeeklyUsage`fitsby'_8 , label("Week 8") $COLOR_BLACK msymbol(S)) ///
428
+ (PD_WeeklyUsage`fitsby'_9 , label("Week 9") $COLOR_GRAY msymbol(D)) ///
429
+ (PD_WeeklyUsage`fitsby'_10, label("Week 10") $COLOR_MAROON msymbol(O)) ///
430
+ (PD_WeeklyUsage`fitsby'_11, label("Week 11") $COLOR_BLACK msymbol(S)) ///
431
+ (PD_WeeklyUsage`fitsby'_12, label("Week 12") $COLOR_GRAY msymbol(D)) ///
432
+ (PD_WeeklyUsage`fitsby'_13, label("Week 13") $COLOR_MAROON msymbol(O)) ///
433
+ (PD_WeeklyUsage`fitsby'_14, label("Week 14") $COLOR_BLACK msymbol(S)) ///
434
+ (PD_WeeklyUsage`fitsby'_15, label("Week 15") $COLOR_GRAY msymbol(D)), ///
435
+ keep(B L) order(B L) ///
436
+ $COEFPLOT_SETTINGS_MINUTES ///
437
+ $COEFPLOT_LABELS_LIMIT
438
+
439
+ graph export "output/coef_usage_simple_weekly`suffix'.pdf", replace
440
+ end
441
+
442
+ program reg_usage_simple_daily_p12
443
+ syntax, [fitsby]
444
+
445
+ est clear
446
+
447
+ * Determine FITSBY restriction
448
+ if ("`fitsby'" == "fitsby") {
449
+ local fitsby "FITSBY"
450
+ local suffix "_fitsby"
451
+ }
452
+ else {
453
+ local fitsby ""
454
+ local suffix ""
455
+ }
456
+
457
+ * Run regressions
458
+ foreach day of numlist 1/42 {
459
+ local yvar PD_DailyUsage`fitsby'_`day'
460
+
461
+ gen_treatment, suffix(`day') simple
462
+ reg_treatment, yvar(`yvar') indep($STRATA) suffix(`day') simple
463
+ est store `yvar'
464
+ }
465
+
466
+ * Plot regressions (by period)
467
+ coefplot (PD_DailyUsage`fitsby'_1, label("Day 1") $COLOR_NAVY) ///
468
+ (PD_DailyUsage`fitsby'_2, label("Day 2") $COLOR_NAVY) ///
469
+ (PD_DailyUsage`fitsby'_3, label("Day 3") $COLOR_NAVY ) ///
470
+ (PD_DailyUsage`fitsby'_4, label("Day 4") $COLOR_NAVY) ///
471
+ (PD_DailyUsage`fitsby'_5, label("Day 5") $COLOR_NAVY) ///
472
+ (PD_DailyUsage`fitsby'_6, label("Day 6") $COLOR_NAVY) ///
473
+ (PD_DailyUsage`fitsby'_7, label("Day 7") $COLOR_NAVY ) ///
474
+ (PD_DailyUsage`fitsby'_8, label("Day 8") $COLOR_NAVY) ///
475
+ (PD_DailyUsage`fitsby'_9, label("Day 9") $COLOR_NAVY) ///
476
+ (PD_DailyUsage`fitsby'_10, label("Day 10") $COLOR_NAVY ) ///
477
+ (PD_DailyUsage`fitsby'_11, label("Day 11") $COLOR_NAVY ) ///
478
+ (PD_DailyUsage`fitsby'_12, label("Day 12") $COLOR_NAVY ) ///
479
+ (PD_DailyUsage`fitsby'_13, label("Day 13") $COLOR_NAVY) ///
480
+ (PD_DailyUsage`fitsby'_14, label("Day 14") $COLOR_NAVY ) ///
481
+ (PD_DailyUsage`fitsby'_15, label("Day 15") $COLOR_NAVY ) ///
482
+ (PD_DailyUsage`fitsby'_16, label("Day 16") $COLOR_NAVY ) ///
483
+ (PD_DailyUsage`fitsby'_17, label("Day 17") $COLOR_NAVY) ///
484
+ (PD_DailyUsage`fitsby'_18, label("Day 18") $COLOR_NAVY ) ///
485
+ (PD_DailyUsage`fitsby'_19, label("Day 19") $COLOR_NAVY ) ///
486
+ (PD_DailyUsage`fitsby'_20, label("Day 20") $COLOR_NAVY ) ///
487
+ (PD_DailyUsage`fitsby'_21, label("Day 21") $COLOR_NAVY) ///
488
+ (PD_DailyUsage`fitsby'_22, label("Day 22") $COLOR_NAVY ) ///
489
+ (PD_DailyUsage`fitsby'_23, label("Day 23") $COLOR_NAVY ) ///
490
+ (PD_DailyUsage`fitsby'_24, label("Day 24") $COLOR_NAVY ) ///
491
+ (PD_DailyUsage`fitsby'_25, label("Day 25") $COLOR_NAVY) ///
492
+ (PD_DailyUsage`fitsby'_26, label("Day 26") $COLOR_NAVY ) ///
493
+ (PD_DailyUsage`fitsby'_27, label("Day 27") $COLOR_NAVY ) ///
494
+ (PD_DailyUsage`fitsby'_28, label("Day 28") $COLOR_NAVY ) ///
495
+ (PD_DailyUsage`fitsby'_29, label("Day 29") $COLOR_NAVY) ///
496
+ (PD_DailyUsage`fitsby'_30, label("Day 30") $COLOR_NAVY ) ///
497
+ (PD_DailyUsage`fitsby'_31, label("Day 31") $COLOR_NAVY ) ///
498
+ (PD_DailyUsage`fitsby'_32, label("Day 32") $COLOR_NAVY) ///
499
+ (PD_DailyUsage`fitsby'_33, label("Day 33") $COLOR_NAVY) ///
500
+ (PD_DailyUsage`fitsby'_34, label("Day 34") $COLOR_NAVY ) ///
501
+ (PD_DailyUsage`fitsby'_35, label("Day 35") $COLOR_NAVY ) ///
502
+ (PD_DailyUsage`fitsby'_36, label("Day 36") $COLOR_NAVY ) ///
503
+ (PD_DailyUsage`fitsby'_37, label("Day 37") $COLOR_NAVY) ///
504
+ (PD_DailyUsage`fitsby'_38, label("Day 38") $COLOR_NAVY ) ///
505
+ (PD_DailyUsage`fitsby'_39, label("Day 39") $COLOR_NAVY ) ///
506
+ (PD_DailyUsage`fitsby'_40, label("Day 40") $COLOR_NAVY ) ///
507
+ (PD_DailyUsage`fitsby'_41, label("Day 41") $COLOR_NAVY) ///
508
+ (PD_DailyUsage`fitsby'_42, label("Day 42") $COLOR_NAVY), ///
509
+ keep(B*) xline(22) ///
510
+ $COEFPLOT_SETTINGS_MINUTES ///
511
+ $COEFPLOT_LABELS_LIMIT legend(off) ///
512
+ xlabel(10 "Period 1" 22 "Survey 2" 34 "Period 2") ///
513
+
514
+
515
+ graph export "output/coef_usage_simple_daily_p12`suffix'.pdf", replace
516
+ end
517
+
518
+ program reg_sms_addiction_simple_weekly
519
+ syntax
520
+
521
+ est clear
522
+
523
+ * Run regressions
524
+ foreach week of numlist 4/9 {
525
+ local yvar Week`week'_SMSIndex
526
+ local comparison_week = `week' - 3
527
+ if (`comparison_week' > 3){
528
+ local comparison_week = `week' - 6
529
+ }
530
+
531
+ local baseline Week`comparison_week'_SMSIndex
532
+
533
+ gen_treatment, simple
534
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') simple
535
+ est store `yvar'
536
+ }
537
+
538
+ * Plot regressions (by period)
539
+ coefplot (Week4_SMSIndex , label("Week 4") $COLOR_MAROON) ///
540
+ (Week5_SMSIndex , label("Week 5") $COLOR_BLACK ) ///
541
+ (Week6_SMSIndex , label("Week 6") $COLOR_GRAY ) ///
542
+ (Week7_SMSIndex , label("Week 7") $COLOR_MAROON) ///
543
+ (Week8_SMSIndex , label("Week 8") $COLOR_BLACK ) ///
544
+ (Week9_SMSIndex , label("Week 9") $COLOR_GRAY ), ///
545
+ keep(B L) order(B L) ///
546
+ $COEFPLOT_SETTINGS_MINUTES ///
547
+ $COEFPLOT_LABELS_LIMIT
548
+
549
+ graph export "output/coef_sms_addiction_simple_weekly.pdf", replace
550
+ end
551
+
552
+ program reg_addiction_simple
553
+ syntax
554
+
555
+ est clear
556
+
557
+ * Run regressions for limit
558
+ foreach num of numlist 1/16 {
559
+ local baseline S1_Addiction_`num'
560
+
561
+ gen S43_Addiction_`num' = (S3_Addiction_`num' + S4_Addiction_`num') / 2
562
+ local yvar S43_Addiction_`num'
563
+
564
+ gen_treatment, suffix(_`yvar') simple
565
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
566
+ est store `yvar'
567
+ }
568
+
569
+ * Run regressions for bonus
570
+ foreach num of numlist 1/16 {
571
+ local baseline S1_Addiction_`num'
572
+
573
+ local yvar S4_Addiction_`num'
574
+
575
+ gen_treatment, suffix(_`yvar') simple
576
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
577
+ est store `yvar'
578
+ }
579
+
580
+ coefplot (S4_Addiction_*, keep(B_*) label("Bonus") mcolor(maroon) ciopts(recast(rcap) lcolor(maroon)) rename(B_S4_* = *)) ///
581
+ (S43_Addiction_*, keep(L_*) label("Limit") mcolor(gray) ciopts(recast(rcap) lcolor(gray)) rename(L_S43_* = *)), ///
582
+ $COEFPLOT_ADDICTION_SETTINGS ///
583
+ $ADDICTION_LABELS ///
584
+ yaxis(1) yscale(axis(1) range(0)) xlabel(-0.06(0.02)0.06, axis(1)) ///
585
+ ylabel(1 "Fear missing what happening online" 2 "Check social media/messages immediately after waking up" ///
586
+ 3 "Use longer than intended" 4 "Tell yourself just a few more minutes" ///
587
+ 5 "Use to distract from personal issues" 6 "Use to distract from anxiety/depression/etc." ///
588
+ 7 "Use to relax to go to sleep" 8 "Try and fail to reduce use" ///
589
+ 9 "Others are concerned about use" 10 "Feel anxious without phone" ///
590
+ 11 "Have difficulty putting down phone " 12 "Annoyed at interruption in use" ///
591
+ 13 "Use harms school/work performance" 14 "Lose sleep from use" ///
592
+ 15 "Prefer phone to human interaction" 16 "Procrastinate by using phone", ///
593
+ valuelabel angle(0)) horizontal ///
594
+ ytitle("") xtitle("Treatment effect", axis(1))
595
+
596
+ graph export "output/coef_addiction_simple.pdf", replace
597
+
598
+ end
599
+
600
+
601
+ program reg_sms_addiction_simple
602
+ est clear
603
+
604
+ preserve
605
+
606
+ * Run regressions for limit
607
+ foreach num of numlist 1/9 {
608
+ local baseline S1_AddictionText_`num'
609
+
610
+ gen S23_AddictionText_`num' = (S2_AddictionText_`num' + S3_AddictionText_`num') / 2
611
+ local yvar S23_AddictionText_`num'
612
+
613
+ gen_treatment, suffix(_`yvar') simple
614
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
615
+ est store `yvar'
616
+ }
617
+
618
+ * Run regressions for bonus
619
+ foreach num of numlist 1/9 {
620
+ local baseline S1_AddictionText_`num'
621
+
622
+ local yvar S3_AddictionText_`num'
623
+
624
+ gen_treatment, suffix(_`yvar') simple
625
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
626
+ est store `yvar'
627
+ }
628
+
629
+ coefplot (S3_AddictionText_*, keep(B_*) label("Bonus") mcolor(maroon) ciopts(recast(rcap) lcolor(maroon)) rename(B_S3_* = *)) ///
630
+ (S23_AddictionText_*, keep(L_*) label("Limit") mcolor(gray) ciopts(recast(rcap) lcolor(gray)) rename(L_S23_* = *)), ///
631
+ $COEFPLOT_ADDICTION_SETTINGS ///
632
+ $ADDICTION_LABELS ///
633
+ yaxis(1) yscale(axis(1) range(0)) xlabel(-0.2(0.05)0.2, axis(1)) ///
634
+ ylabel(1 "Use longer than intended" 2 "Use harms school/work performance" ///
635
+ 3 "Easy to control screen time x (-1)" 4 "Use mindlessly" ///
636
+ 5 "Use because felt down" 6 "Use kept from working on something needed" ///
637
+ 7 "Ideally used phone less" 8 "Lose sleep from use" ///
638
+ 9 "Check social media/messages immediately after waking up", ///
639
+ valuelabel angle(0)) horizontal ///
640
+ ytitle("") xtitle("Treatment effect", axis(1))
641
+
642
+ graph export "output/coef_sms_addiction_simple.pdf", replace
643
+
644
+ restore
645
+ end
646
+
647
+
648
+ program reg_swb_simple
649
+ est clear
650
+
651
+ preserve
652
+
653
+ gen S1_WellBeing_8 = (S1_WellBeing_1 + S1_WellBeing_2 + S1_WellBeing_3 + S1_WellBeing_4)/4
654
+ gen S1_WellBeing_9 = (S1_WellBeing_5 + S1_WellBeing_6 + S1_WellBeing_7)/3
655
+ gen S3_WellBeing_8 = (S3_WellBeing_1 + S3_WellBeing_2 + S3_WellBeing_3 + S3_WellBeing_4)/4
656
+ gen S3_WellBeing_9 = (S3_WellBeing_5 + S3_WellBeing_6 + S3_WellBeing_7)/3
657
+ gen S4_WellBeing_8 = (S4_WellBeing_1 + S4_WellBeing_2 + S4_WellBeing_3 + S4_WellBeing_4)/4
658
+ gen S4_WellBeing_9 = (S4_WellBeing_5 + S4_WellBeing_6 + S4_WellBeing_7)/3
659
+
660
+
661
+ * Run regressions for limit
662
+ foreach num of numlist 1/9 {
663
+ local baseline S1_WellBeing_`num'
664
+
665
+ gen S43_WellBeing_`num' = (S4_WellBeing_`num' + S3_WellBeing_`num') / 2
666
+ local yvar S43_WellBeing_`num'
667
+
668
+ gen_treatment, suffix(_`yvar') simple
669
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
670
+ est store `yvar'
671
+ }
672
+
673
+ * Run regressions for bonus
674
+ foreach num of numlist 1/9 {
675
+ local baseline S1_WellBeing_`num'
676
+
677
+ local yvar S4_WellBeing_`num'
678
+
679
+ gen_treatment, suffix(_`yvar') simple
680
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
681
+ est store `yvar'
682
+ }
683
+
684
+ coefplot (S4_WellBeing_*, keep(B_*) label("Bonus") mcolor(maroon) ciopts(recast(rcap) lcolor(maroon)) rename(B_S4_* = *)) ///
685
+ (S43_WellBeing_*, keep(L_*) label("Limit") mcolor(gray) ciopts(recast(rcap) lcolor(gray)) rename(L_S43_* = *)), ///
686
+ $COEFPLOT_ADDICTION_SETTINGS ///
687
+ $ADDICTION_LABELS ///
688
+ yaxis(1) yscale(axis(1) range(0)) xlabel(-0.09(0.03)0.09, axis(1)) ///
689
+ ylabel(1 "Was happy" 2 "Was satisfied with life" ///
690
+ 3 "Felt anxious x (-1)" 4 "Felt depressed x (-1)" ///
691
+ 5 "Could concentrate" 6 "Was easily distracted x (-1)" ///
692
+ 7 "Slept well" 8 "Happy <-> depressed index" ///
693
+ 9 "Concentrate <-> sleep index", ///
694
+ valuelabel angle(0)) horizontal ///
695
+ ytitle("") xtitle("Treatment effect", axis(1))
696
+
697
+ graph export "output/coef_swb_simple.pdf", replace
698
+
699
+ restore
700
+ end
701
+
702
+ program reg_swb_icw_simple
703
+ est clear
704
+
705
+ preserve
706
+
707
+ * Run regressions for limit
708
+ foreach num of numlist 1/7 {
709
+ local baseline S1_WellBeing_`num'
710
+
711
+ gen S43_WellBeing_`num' = (S4_WellBeing_`num' + S3_WellBeing_`num') / 2
712
+ local yvar S43_WellBeing_`num'
713
+
714
+ gen_treatment, suffix(_`yvar') simple
715
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
716
+ est store `yvar'
717
+ }
718
+
719
+ * Run regressions for bonus
720
+ foreach num of numlist 1/7 {
721
+ local baseline S1_WellBeing_`num'
722
+
723
+ local yvar S3_WellBeing_`num'
724
+
725
+ gen_treatment, suffix(_`yvar') simple
726
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
727
+ est store `yvar'
728
+ }
729
+
730
+ foreach idx in HSAD CDS {
731
+ local baseline S1_index_`idx'
732
+ gen S43_index_`idx' = (S3_index_`idx' + S4_index_`idx') / 2
733
+ local yvar S43_index_`idx'
734
+ gen_treatment, suffix(_`yvar') simple
735
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
736
+ est store `yvar'
737
+
738
+ local yvar S3_index_`idx'
739
+ gen_treatment, suffix(_`yvar') simple
740
+ reg_treatment, yvar(`yvar') indep($STRATA `baseline') suffix(_`yvar') simple
741
+ est store `yvar'
742
+ }
743
+
744
+ coefplot (S3_WellBeing_* S3_index_*, keep(B_*) label("Bonus") mcolor(maroon) ciopts(recast(rcap) lcolor(maroon)) rename(B_S3_* = *)) ///
745
+ (S43_WellBeing_* S43_index_*, keep(L_*) label("Limit") mcolor(gray) ciopts(recast(rcap) lcolor(gray)) rename(L_S43_* = *)), ///
746
+ $COEFPLOT_ADDICTION_SETTINGS ///
747
+ $ADDICTION_LABELS ///
748
+ yscale(axis(1) range(0)) xlabel(-0.09(0.03)0.09, axis(1)) ///
749
+ horizontal ///
750
+ xtitle("Treatment effect", axis(1)) ///
751
+ group(*index*="", nolabels) ///
752
+ ylabel(1 "Was happy" 2 "Was satisfied with life" ///
753
+ 3 "Felt anxious x (-1)" 4 "Felt depressed x (-1)" ///
754
+ 5 "Could concentrate" 6 "Was easily distracted x (-1)" ///
755
+ 7 "Slept well" ///
756
+ 9 "Happy, satisfied, anxious, depressed index" ///
757
+ 10 "Concentrate, distracted, sleep index", ///
758
+ valuelabel angle(0))
759
+
760
+
761
+ graph export "output/coef_swb_icw_simple.pdf", replace
762
+
763
+ restore
764
+ end
765
+
766
+ program plot_snooze
767
+ syntax, [fitsby] [minutes]
768
+
769
+ * Determine FITSBY restriction
770
+ if ("`fitsby'" == "fitsby") {
771
+ local fitsby "FITSBY"
772
+ local suffix "_fitsby"
773
+ }
774
+ else {
775
+ local fitsby ""
776
+ local suffix ""
777
+ }
778
+
779
+ * Determine snooze measure
780
+ if ("`minutes'" == "minutes") {
781
+ local measure "Min_W"
782
+ local root "min"
783
+ local ytitle "(minutes/day)"
784
+ }
785
+ else {
786
+ local measure "Count"
787
+ local root "count"
788
+ local ytitle "(count/day)"
789
+ }
790
+
791
+ * Preserve data
792
+ preserve
793
+
794
+ * Reshape data
795
+ keep UserID PD_*Snooze`measure'`fitsby'
796
+ rename_but, varlist(UserID) prefix(snooze)
797
+ reshape long snooze, i(UserID) j(measure) string
798
+
799
+ * Recode data
800
+ encode measure, generate(measure_encode)
801
+
802
+ recode measure_encode ///
803
+ (1 = 1 "Period 2") ///
804
+ (2 = 2 "Period 3") ///
805
+ (5 = 3 "Period 4") ///
806
+ (7 = 4 "Period 5") ///
807
+ (4 = 5 "Periods 3 & 4") ///
808
+ (3 = 6 "Periods 2 to 4") ///
809
+ (6 = 7 "Periods 2 to 5"), ///
810
+ gen(measure_recode)
811
+
812
+ * Plot data
813
+ gen dummy = 1
814
+
815
+ cispike snooze if measure_recode <= 4, ///
816
+ over1(dummy) over2(measure_recode) ///
817
+ $CISPIKE_SETTINGS ///
818
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
819
+ ytitle("Snooze use `ytitle'" " ") ///
820
+ legend(off))
821
+
822
+ graph export "output/cispike_snooze_`root'`suffix'.pdf", replace
823
+
824
+ * Restore data
825
+ restore
826
+ end
827
+
828
+ program plot_snooze_both
829
+ syntax, [fitsby]
830
+
831
+ * Determine FITSBY restriction
832
+ if ("`fitsby'" == "fitsby") {
833
+ local fitsby "FITSBY"
834
+ local suffix "_fitsby"
835
+ local ylabel2 0(8)40
836
+ local ylabel1 0(.1).5
837
+ }
838
+ else {
839
+ local fitsby ""
840
+ local suffix ""
841
+ local ylabel2 0(8)40
842
+ local ylabel1 0(.1).5
843
+ }
844
+
845
+ * Preserve data
846
+ preserve
847
+
848
+ * Reshape data
849
+ keep UserID PD_*SnoozeCount`fitsby' *SnoozeMin_W`fitsby'
850
+ rename PD_*Snooze*`fitsby' **
851
+ rename_but, varlist(UserID) prefix(snooze)
852
+ reshape long snooze, i(UserID) j(measure) string
853
+
854
+ split measure, p("_")
855
+ drop measure
856
+ rename (measure1 measure2) (time measure)
857
+
858
+ * Recode data
859
+ encode time, generate(time_encode)
860
+ encode measure, generate(measure_encode)
861
+
862
+ recode time_encode ///
863
+ (1 = 1 "Period 2") ///
864
+ (2 = 2 "Period 3") ///
865
+ (3 = 3 "Period 4") ///
866
+ (6 = 4 "Period 5") ///
867
+ (4 = 5 "Periods 3 & 4") ///
868
+ (5 = 6 "Periods 2 to 4") ///
869
+ (7 = 7 "Periods 2 to 5"), ///
870
+ gen(time_recode)
871
+
872
+ recode measure_encode ///
873
+ (1 = 1 "Snoozes per day") ///
874
+ (2 = 2 "Snooze minutes per day"), ///
875
+ gen(measure_recode)
876
+
877
+ * Plot data
878
+
879
+ // Manually set labels and legends for double axis figures
880
+ cispike snooze if time_recode <= 3, ///
881
+ over1(measure_recode) over2(time_recode) ///
882
+ $CISPIKE_DOUBLE_SETTINGS ///
883
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
884
+ ylabel(`ylabel1', axis(1)) ///
885
+ ylabel(`ylabel2', axis(2)) ///
886
+ ytitle("Snoozes per day" " ", axis(1)) ///
887
+ ytitle(" " "Snooze minutes per day", axis(2)) ///
888
+ legend(order(4 "Snoozes per day" 10 "Snooze minutes per day")))
889
+
890
+ graph export "output/cispike_snooze_both`suffix'.pdf", replace
891
+
892
+ * Restore data
893
+ restore
894
+ end
895
+
896
+ program plot_snooze_by_limit
897
+ syntax, [fitsby] [minutes]
898
+
899
+ * Determine FITSBY restriction
900
+ if ("`fitsby'" == "fitsby") {
901
+ local fitsby "FITSBY"
902
+ local suffix "_fitsby"
903
+ }
904
+ else {
905
+ local fitsby ""
906
+ local suffix ""
907
+ }
908
+
909
+ * Determine snooze measure
910
+ if ("`minutes'" == "minutes") {
911
+ local measure "Min_W"
912
+ local root "min"
913
+ local ytitle "(minutes/day)"
914
+ }
915
+ else {
916
+ local measure "Count"
917
+ local root "count"
918
+ local ytitle "(count/day)"
919
+ }
920
+
921
+ * Preserve data
922
+ preserve
923
+
924
+ * Reshape data
925
+ keep UserID S2_LimitType PD_*Snooze`measure'`fitsby'
926
+ rename_but, varlist(UserID S2_LimitType) prefix(snooze)
927
+ reshape long snooze, i(UserID S2_LimitType) j(measure) string
928
+
929
+ * Recode data
930
+ encode measure, generate(measure_encode)
931
+
932
+ recode measure_encode ///
933
+ (1 = 1 "Period 2") ///
934
+ (2 = 2 "Period 3") ///
935
+ (5 = 3 "Period 4") ///
936
+ (7 = 4 "Period 5") ///
937
+ (4 = 5 "Periods 3 & 4") ///
938
+ (3 = 6 "Periods 2 to 4") ///
939
+ (6 = 7 "Periods 2 to 5"), ///
940
+ gen(measure_recode)
941
+
942
+ recode S2_LimitType ///
943
+ (0 = .) ///
944
+ (1 = 1 "Snooze 0") ///
945
+ (2 = 2 "Snooze 2") ///
946
+ (3 = 3 "Snooze 5") ///
947
+ (4 = 4 "Snooze 20") ///
948
+ (5 = .), ///
949
+ gen(S2_LimitType_recode)
950
+
951
+ * Plot data (by period)
952
+ cispike snooze if measure_recode <= 3, ///
953
+ over1(measure_recode) over2(S2_LimitType_recode) ///
954
+ $CISPIKE_SETTINGS ///
955
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
956
+ ytitle("Snooze use `ytitle'" " "))
957
+
958
+ graph export "output/cispike_snooze_`root'_by_limit`suffix'.pdf", replace
959
+
960
+ * Plot data (all periods)
961
+ cispike snooze if measure_recode == 5, ///
962
+ over1(measure_recode) over2(S2_LimitType_recode) ///
963
+ $CISPIKE_SETTINGS ///
964
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
965
+ ytitle("Snooze use `ytitle'" " ") ///
966
+ legend(off))
967
+
968
+ graph export "output/cispike_snooze_`root'_combined_by_limit`suffix'.pdf", replace
969
+
970
+
971
+ * Restore data
972
+ restore
973
+ end
974
+
975
+ program plot_snooze_both_by_limit
976
+ syntax, [fitsby]
977
+
978
+ * Determine FITSBY restriction
979
+ if ("`fitsby'" == "fitsby") {
980
+ local fitsby "FITSBY"
981
+ local suffix "_fitsby"
982
+ local ylabel2 0(12)60
983
+ local ylabel1 0(.3)1.5
984
+ }
985
+ else {
986
+ local fitsby ""
987
+ local suffix ""
988
+ local ylabel2 0(12)60
989
+ local ylabel1 0(.3)1.5
990
+ }
991
+
992
+ * Preserve data
993
+ preserve
994
+
995
+ * Reshape data
996
+ keep UserID S2_LimitType PD_*SnoozeCount`fitsby' *SnoozeMin_W`fitsby'
997
+ rename PD_*Snooze*`fitsby' **
998
+ rename_but, varlist(UserID S2_LimitType) prefix(snooze)
999
+ reshape long snooze, i(UserID S2_LimitType) j(measure) string
1000
+
1001
+ split measure, p("_")
1002
+ drop measure
1003
+ rename (measure1 measure2) (time measure)
1004
+
1005
+ * Recode data
1006
+ encode time, generate(time_encode)
1007
+ encode measure, generate(measure_encode)
1008
+
1009
+ recode S2_LimitType ///
1010
+ (0 = .) ///
1011
+ (1 = 1 "Snooze 0") ///
1012
+ (2 = 2 "Snooze 2") ///
1013
+ (3 = 3 "Snooze 5") ///
1014
+ (4 = 4 "Snooze 20") ///
1015
+ (5 = .), ///
1016
+ gen(S2_LimitType_recode)
1017
+
1018
+ recode time_encode ///
1019
+ (1 = 1 "Period 2") ///
1020
+ (2 = 2 "Period 3") ///
1021
+ (3 = 3 "Period 4") ///
1022
+ (6 = 4 "Period 5") ///
1023
+ (4 = 5 "Periods 3 & 4") ///
1024
+ (5 = 6 "Periods 2 to 4") ///
1025
+ (7 = 7 "Periods 2 to 5"), ///
1026
+ gen(time_recode)
1027
+
1028
+ recode measure_encode ///
1029
+ (1 = 1 "Snoozes per day") ///
1030
+ (2 = 2 "Snooze minutes per day"), ///
1031
+ gen(measure_recode)
1032
+
1033
+ * Plot data
1034
+
1035
+ // Manually set labels and legends for double axis figures
1036
+
1037
+ * Plot data (all periods)
1038
+ cispike snooze if time_recode == 6, ///
1039
+ over1(measure_recode) over2(S2_LimitType_recode) ///
1040
+ $CISPIKE_DOUBLE_SETTINGS ///
1041
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
1042
+ ylabel(`ylabel1', axis(1)) ///
1043
+ ylabel(`ylabel2', axis(2)) ///
1044
+ ytitle("Snoozes per day" " ", axis(1)) ///
1045
+ ytitle(" " "Snooze minutes per day", axis(2)) ///
1046
+ legend(order(5 "Snoozes per day" 13 "Snooze minutes per day")))
1047
+
1048
+ graph export "output/cispike_snooze_both_combined_by_limit`suffix'.pdf", replace
1049
+
1050
+ * Restore data
1051
+ restore
1052
+ end
1053
+
1054
+ program plot_phone_use_change
1055
+ * Preserve data
1056
+ preserve
1057
+
1058
+ * Reshape data
1059
+ keep UserID S2_LimitType *PhoneUseChange
1060
+ rename_but, varlist(UserID S2_LimitType) prefix(phone_use)
1061
+ reshape long phone_use, i(UserID S2_LimitType) j(measure) string
1062
+
1063
+ * Recode data
1064
+ encode measure, generate(measure_encode)
1065
+
1066
+ recode measure_encode ///
1067
+ (1 = 1 "Survey 1") ///
1068
+ (2 = 2 "Survey 3") ///
1069
+ (3 = 3 "Survey 4"), ///
1070
+ gen(measure_recode)
1071
+
1072
+ recode S2_LimitType ///
1073
+ (0 = 0 "Control") ///
1074
+ (1 = 1 "Snooze 0") ///
1075
+ (2 = 2 "Snooze 2") ///
1076
+ (3 = 3 "Snooze 5") ///
1077
+ (4 = 4 "Snooze 20") ///
1078
+ (5 = 5 "No snooze"), ///
1079
+ gen(S2_LimitType_recode)
1080
+
1081
+ * Plot data
1082
+ cispike phone_use, ///
1083
+ over1(measure_recode) over2(S2_LimitType_recode) ///
1084
+ $CISPIKE_SETTINGS ///
1085
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
1086
+ ytitle("Phone use change (percent)" " ") ///
1087
+ yline(0, lwidth(thin) lcolor(black)))
1088
+
1089
+ graph export "output/cispike_phone_use.pdf", replace
1090
+
1091
+ * Restore data
1092
+ restore
1093
+ end
1094
+
1095
+ program plot_phone_use_change_simple
1096
+ * Preserve data
1097
+ preserve
1098
+
1099
+ * Reshape data
1100
+ keep UserID S2_LimitType S3_Bonus *PhoneUseChange
1101
+ rename_but, varlist(UserID S2_LimitType S3_Bonus) prefix(phone_use)
1102
+ reshape long phone_use, i(UserID S2_LimitType S3_Bonus) j(measure) string
1103
+
1104
+ * Recode data
1105
+ encode measure, generate(measure_encode)
1106
+
1107
+ recode measure_encode ///
1108
+ (1 = 1 "Survey 1") ///
1109
+ (2 = 2 "Survey 3") ///
1110
+ (3 = 3 "Survey 4"), ///
1111
+ gen(measure_recode)
1112
+
1113
+ gen treatment = .
1114
+ replace treatment = 0 if S2_LimitType == 0 & S3_Bonus == 0
1115
+ replace treatment = 1 if S2_LimitType == 0 & S3_Bonus == 1
1116
+ replace treatment = 2 if S2_LimitType != 0 & S3_Bonus == 0
1117
+ replace treatment = 3 if S2_LimitType != 0 & S3_Bonus == 1
1118
+
1119
+ recode treatment ///
1120
+ (0 = 0 "Control") ///
1121
+ (1 = 1 "Bonus only") ///
1122
+ (2 = 2 "Limit only") ///
1123
+ (3 = 3 "Bonus and limit"), ///
1124
+ gen(treatment_recode)
1125
+
1126
+ * Plot data
1127
+ cispike phone_use, ///
1128
+ over1(measure_recode) over2(treatment_recode) ///
1129
+ $CISPIKE_SETTINGS ///
1130
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
1131
+ ytitle("Phone use change (percent)" " ") ///
1132
+ yline(0, lwidth(thin) lcolor(black)))
1133
+
1134
+ graph export "output/cispike_phone_use_simple.pdf", replace
1135
+
1136
+ * Restore data
1137
+ restore
1138
+ end
1139
+
1140
+ program reg_usage_interaction
1141
+ syntax, [fitsby]
1142
+
1143
+ est clear
1144
+
1145
+ * Determine FITSBY restriction
1146
+ if ("`fitsby'" == "fitsby") {
1147
+ local fitsby "FITSBY"
1148
+ local suffix "_fitsby"
1149
+ }
1150
+ else {
1151
+ local fitsby ""
1152
+ local suffix ""
1153
+ }
1154
+
1155
+ * Run regressions
1156
+ foreach yvar in PD_P2_Usage`fitsby' ///
1157
+ PD_P3_Usage`fitsby' ///
1158
+ PD_P4_Usage`fitsby' ///
1159
+ PD_P5_Usage`fitsby' {
1160
+ local baseline PD_P1_Usage`fitsby'
1161
+
1162
+ gen_interaction
1163
+ reg_interaction, yvar(`yvar') indep($STRATA `baseline')
1164
+ est store `yvar'
1165
+ }
1166
+
1167
+ * Plot regressions
1168
+ coefplot (PD_P2_Usage`fitsby', label("Period 2") $COLOR_MAROON msymbol(O)) ///
1169
+ (PD_P3_Usage`fitsby', label("Period 3") $COLOR_BLACK msymbol(S)) ///
1170
+ (PD_P4_Usage`fitsby', label("Period 4") $COLOR_NAVY msymbol(D)) ///
1171
+ (PD_P5_Usage`fitsby', label("Period 5") $COLOR_GRAY msymbol(T)), ///
1172
+ keep(B_* L_*) order(B_1 L_1 B_L_1_1) ///
1173
+ $COEFPLOT_SETTINGS_MINUTES
1174
+
1175
+ graph export "output/coef_usage_interaction`suffix'.pdf", replace
1176
+ end
1177
+
1178
+ program reshape_self_control_outcomes
1179
+ * Reshape wide to long
1180
+ gen S4_Usage_FITSBY = PD_P3_UsageFITSBY
1181
+ gen S3_Usage_FITSBY = PD_P2_UsageFITSBY
1182
+
1183
+ keep UserID S3_Bonus S2_LimitType Stratifier ///
1184
+ S*_Usage_FITSBY ///
1185
+ S*_PhoneUseChange_N ///
1186
+ S*_AddictionIndex_N ///
1187
+ S*_SMSIndex_N ///
1188
+ S*_SWBIndex_N ///
1189
+ S*_LifeBetter_N ///
1190
+ S*_index_well_N
1191
+
1192
+ local indep UserID S3_Bonus S2_LimitType Stratifier S1_*
1193
+ rename_but, varlist(`indep') prefix(outcome)
1194
+ reshape long outcome, i(`indep') j(measure) string
1195
+
1196
+ split measure, p(_)
1197
+ replace measure = measure2 + "_" + measure3 + "_" + measure4 if measure4 != ""
1198
+ replace measure = measure2 + "_" + measure3 if measure4 == ""
1199
+ rename measure1 survey
1200
+ drop measure2 measure3 measure4
1201
+
1202
+ * Reshape long to wide
1203
+ reshape wide outcome, i(UserID survey) j(measure) string
1204
+ rename outcome* *
1205
+
1206
+ * Recode data
1207
+ encode survey, gen(S)
1208
+
1209
+ * Label data
1210
+ label var PhoneUseChange "Ideal use change"
1211
+ label var AddictionIndex "Addiction scale x (-1)"
1212
+ label var SMSIndex "SMS addiction scale x (-1)"
1213
+ label var LifeBetter "Phone makes life better"
1214
+ label var SWBIndex "Subjective well-being"
1215
+ label var index_well "Survey index"
1216
+ end
1217
+
1218
+ program gen_coefficient
1219
+ syntax, var(str) suffix(str) label_var(str)
1220
+
1221
+ cap drop C`suffix'
1222
+ gen C`suffix' = `var'
1223
+
1224
+ local vlabel: variable label `label_var'
1225
+ label var C`suffix' "`vlabel'"
1226
+ end
1227
+
1228
+ program reg_self_control
1229
+ est clear
1230
+
1231
+ * Preserve data
1232
+ preserve
1233
+
1234
+ * Reshape data
1235
+ reshape_self_control_outcomes
1236
+
1237
+ * Specify regression
1238
+ local yvarset ///
1239
+ PhoneUseChange_N ///
1240
+ AddictionIndex_N ///
1241
+ SMSIndex_N ///
1242
+ LifeBetter_N ///
1243
+ SWBIndex_N ///
1244
+ index_well_N
1245
+
1246
+ * Run regressions
1247
+ foreach yvar in `yvarset' {
1248
+ local baseline = "S1_`yvar'"
1249
+
1250
+ * Treatment indicators
1251
+ gen_treatment, suffix(_`yvar') simple
1252
+ cap drop B3_`yvar'
1253
+ cap drop B4_`yvar'
1254
+ gen B3_`yvar' = B_`yvar' * (S == 1)
1255
+ gen B4_`yvar' = B_`yvar' * (S == 2)
1256
+
1257
+ * Specify regression
1258
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
1259
+
1260
+ * Limit
1261
+ gen_coefficient, var(L_`yvar') suffix(_`yvar') label_var(`yvar')
1262
+ reg `yvar' C_`yvar' B3_`yvar' B4_`yvar' `indep', robust cluster(UserID)
1263
+ est store L_`yvar'
1264
+
1265
+ * Bonus
1266
+ gen_coefficient, var(B4_`yvar') suffix(_`yvar') label_var(`yvar')
1267
+ reg `yvar' L_`yvar' B3_`yvar' C_`yvar' `indep', robust cluster(UserID)
1268
+ est store B_`yvar'
1269
+ }
1270
+
1271
+ * Plot regressions
1272
+ coefplot (B_*, label("Bonus") $COLOR_MAROON) ///
1273
+ (L_*, label("Limit") $COLOR_GRAY), ///
1274
+ keep(C_*) ///
1275
+ $COEFPLOT_SETTINGS_ITT
1276
+
1277
+ graph export "output/coef_self_control.pdf", replace
1278
+
1279
+ * Restore data
1280
+ restore
1281
+ end
1282
+
1283
+ program reg_self_control_null
1284
+ est clear
1285
+
1286
+ * Preserve data
1287
+ preserve
1288
+
1289
+ * Reshape data
1290
+ reshape_self_control_outcomes
1291
+
1292
+ * Specify regression
1293
+ local yvarset ///
1294
+ PhoneUseChange_N ///
1295
+ AddictionIndex_N ///
1296
+ SMSIndex_N ///
1297
+ LifeBetter_N ///
1298
+ SWBIndex_N ///
1299
+ index_well_N
1300
+
1301
+ * Run regressions
1302
+ foreach yvar in `yvarset' {
1303
+ local baseline = "S1_`yvar'"
1304
+
1305
+ * Treatment indicators
1306
+ gen_treatment, suffix(_`yvar') simple
1307
+ cap drop B3_`yvar'
1308
+ cap drop B4_`yvar'
1309
+ cap drop L3_`yvar'
1310
+ cap drop L4_`yvar'
1311
+ gen B3_`yvar' = B_`yvar' * (S == 1)
1312
+ gen B4_`yvar' = B_`yvar' * (S == 2)
1313
+ gen L3_`yvar' = L_`yvar' * (S == 1)
1314
+ gen L4_`yvar' = L_`yvar' * (S == 2)
1315
+
1316
+ * Specify regression
1317
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
1318
+
1319
+ * Limit
1320
+ gen_coefficient, var(L3_`yvar') suffix(_`yvar') label_var(`yvar')
1321
+ reg `yvar' C_`yvar' B3_`yvar' B4_`yvar' L4_`yvar' `indep', robust cluster(UserID)
1322
+ est store L3_`yvar'
1323
+
1324
+ gen_coefficient, var(L4_`yvar') suffix(_`yvar') label_var(`yvar')
1325
+ reg `yvar' C_`yvar' B3_`yvar' B4_`yvar' L3_`yvar' `indep', robust cluster(UserID)
1326
+ est store L4_`yvar'
1327
+
1328
+ * Bonus
1329
+ gen_coefficient, var(B3_`yvar') suffix(_`yvar') label_var(`yvar')
1330
+ reg `yvar' C_`yvar' L_`yvar' B4_`yvar' `indep', robust cluster(UserID)
1331
+ est store B3_`yvar'
1332
+
1333
+
1334
+ gen_coefficient, var(B4_`yvar') suffix(_`yvar') label_var(`yvar')
1335
+ reg `yvar' C_`yvar' L_`yvar' B3_`yvar' `indep', robust cluster(UserID)
1336
+ est store B4_`yvar'
1337
+ }
1338
+
1339
+ * Plot regressions
1340
+ coefplot (B3_*, label("Bonus: Survey 3") $COLOR_MAROON_LIGHT msymbol(o)) ///
1341
+ (B4_*, label("Bonus: Survey 4") $COLOR_MAROON_DARK msymbol(s)) ///
1342
+ (L3_*, label("Limit: Survey 3") $COLOR_GRAY_LIGHT msymbol(o)) ///
1343
+ (L4_*, label("Limit: Survey 4") $COLOR_GRAY_DARK msymbol(s)), ///
1344
+ keep(C_*) ///
1345
+ $COEFPLOT_SETTINGS_ITT ///
1346
+ $ADDICTION_LABELS
1347
+
1348
+ graph export "output/coef_self_control_null.pdf", replace
1349
+
1350
+ * Restore data
1351
+ restore
1352
+ end
1353
+
1354
+ program reg_iv_self_control
1355
+ est clear
1356
+
1357
+ * Preserve data
1358
+ preserve
1359
+
1360
+ * Reshape data
1361
+ reshape_self_control_outcomes
1362
+
1363
+ * Specify regression
1364
+ local yvarset ///
1365
+ PhoneUseChange_N ///
1366
+ AddictionIndex_N ///
1367
+ SMSIndex_N ///
1368
+ LifeBetter_N ///
1369
+ SWBIndex_N ///
1370
+ index_well_N
1371
+
1372
+ * Run regressions
1373
+ foreach yvar in `yvarset' {
1374
+ local baseline = "S1_`yvar'"
1375
+
1376
+ * Treatment indicators
1377
+ gen_treatment, suffix(_`yvar') simple
1378
+
1379
+ * Specify regression
1380
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
1381
+
1382
+ * Run regression
1383
+ gen_usage_stacked, yvar(`yvar') suffix(_`yvar') var(`yvar')
1384
+ reg_usage_stacked, yvar(`yvar') suffix(_`yvar') indep(`indep')
1385
+ est store U_`yvar'
1386
+ }
1387
+
1388
+ * Plot regressions
1389
+ coefplot (U_*, $COLOR_NAVY), ///
1390
+ keep(U_*) ///
1391
+ $COEFPLOT_SETTINGS_STD ///
1392
+ legend(off)
1393
+
1394
+ graph export "output/coef_iv_self_control.pdf", replace
1395
+
1396
+ * Restore data
1397
+ restore
1398
+ end
1399
+
1400
+ ***********
1401
+ * Execute *
1402
+ ***********
1403
+
1404
+ main
17/replication_package/code/analysis/treatment_effects/code/FDRTable.do ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ***************
2
+ * Environment *
3
+ ***************
4
+
5
+ clear all
6
+ adopath + "input/lib/ado"
7
+ adopath + "input/lib/stata/ado"
8
+
9
+ program main
10
+ define_constants
11
+ import_data
12
+ run_regs
13
+
14
+ create_pval_tables
15
+ create_pval_tables, limit
16
+ end
17
+
18
+ program define_constants
19
+ yaml read YAML using "input/config.yaml"
20
+ yaml global STRATA = YAML.metadata.strata
21
+ end
22
+
23
+ program import_data
24
+ use "input/final_data_sample.dta", clear
25
+ gen_treatment, simple
26
+ end
27
+
28
+ program latex
29
+ syntax, name(str) value(str)
30
+
31
+ local command = "\newcommand{\\`name'}{`value'}"
32
+
33
+ file open scalars using "output/scalars.tex", write append
34
+ file write scalars `"`command'"' _n
35
+ file close scalars
36
+ end
37
+
38
+ program latex_precision
39
+ syntax, name(str) value(str) digits(str)
40
+
41
+ autofmt, input(`value') dec(`digits') strict
42
+ local value = r(output1)
43
+
44
+ latex, name(`name') value(`value')
45
+ end
46
+
47
+ program reshape_self_control_outcomes
48
+ * Reshape wide to long
49
+ gen S4_Usage_FITSBY = PD_P3_UsageFITSBY
50
+ gen S3_Usage_FITSBY = PD_P2_UsageFITSBY
51
+
52
+ keep UserID S3_Bonus S2_LimitType Stratifier ///
53
+ S*_Usage_FITSBY ///
54
+ S*_PhoneUseChange_N ///
55
+ S*_AddictionIndex_N ///
56
+ S*_SMSIndex_N ///
57
+ S*_SWBIndex_N ///
58
+ S*_LifeBetter_N ///
59
+ S*_index_well_N
60
+
61
+ local indep UserID S3_Bonus S2_LimitType Stratifier S1_*
62
+ rename_but, varlist(`indep') prefix(outcome)
63
+ reshape long outcome, i(`indep') j(measure) string
64
+
65
+ split measure, p(_)
66
+ replace measure = measure2 + "_" + measure3 + "_" + measure4 if measure4 != ""
67
+ replace measure = measure2 + "_" + measure3 if measure4 == ""
68
+ rename measure1 survey
69
+ drop measure2 measure3 measure4
70
+
71
+ * Reshape long to wide
72
+ reshape wide outcome, i(UserID survey) j(measure) string
73
+ rename outcome* *
74
+
75
+ * Recode data
76
+ encode survey, gen(S)
77
+
78
+ * Label data
79
+ label var PhoneUseChange "Ideal use change"
80
+ label var AddictionIndex "Addiction scale x (-1)"
81
+ label var SMSIndex "SMS addiction scale x (-1)"
82
+ label var LifeBetter "Phone makes life better"
83
+ label var SWBIndex "Subjective well-being"
84
+ label var index_well "Survey index"
85
+
86
+ end
87
+
88
+ program make_treatment_indicators
89
+ * Hacky way to not have LifeBetter be dropped
90
+ gen alt_LifeBetter_N = LifeBetter_N
91
+ * Treatment indicators
92
+ gen_treatment, simple
93
+ cap drop LifeBetter_N
94
+ gen LifeBetter_N = alt_LifeBetter_N
95
+ label var LifeBetter_N "Phone makes life better"
96
+ end
97
+
98
+ program run_regs
99
+ * Reshape data
100
+ reshape_self_control_outcomes
101
+
102
+ local swb_vars ///
103
+ PhoneUseChange_N ///
104
+ AddictionIndex_N ///
105
+ SMSIndex_N ///
106
+ LifeBetter_N ///
107
+ SWBIndex_N ///
108
+ index_well_N
109
+
110
+ make_treatment_indicators
111
+
112
+ cap drop B3
113
+ cap drop B4
114
+ gen B3 = B * (S == 1)
115
+ gen B4 = B * (S == 2)
116
+ replace B = B4
117
+
118
+ * Run regressions
119
+ foreach yvar in `swb_vars' {
120
+ local baseline = "S1_`yvar'"
121
+
122
+ * Specify regression
123
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
124
+
125
+ * Limit
126
+ reg `yvar' B B4 L `indep', robust cluster(UserID)
127
+ est store `yvar'
128
+ }
129
+ end
130
+
131
+ program create_pval_tables
132
+ syntax, [limit]
133
+
134
+ if ("`limit'" == "limit") {
135
+ local T B
136
+ local Survey "S34"
137
+ local file_suffix "limit"
138
+ }
139
+ else {
140
+ local T L
141
+ local Survey "S3"
142
+ local file_suffix "bonus"
143
+ }
144
+
145
+ local swb_vars ///
146
+ PhoneUseChange_N ///
147
+ AddictionIndex_N ///
148
+ SMSIndex_N ///
149
+ LifeBetter_N ///
150
+ SWBIndex_N ///
151
+ index_well_N
152
+
153
+ local mat_length = 0
154
+ foreach var in `swb_vars' {
155
+ local mat_length = `mat_length' + 1
156
+ }
157
+
158
+ foreach matname in sd count mean Var min max sum range {
159
+ mat `matname'_swb = J(1,`mat_length',.)
160
+ mat rownames `matname'_swb = `matname'
161
+ mat colnames `matname'_swb = `swb_vars'
162
+ }
163
+
164
+ local mat_length = 0
165
+ foreach var in `swb_vars' {
166
+ local mat_length = `mat_length' + 1
167
+ }
168
+ mat pvalues = J(1,`mat_length',.)
169
+
170
+ ** Make descriptive stats and estimate tables
171
+ local pvalue_counter = 1
172
+ foreach varset in swb_vars {
173
+ local suffix swb
174
+ local mat_counter = 1
175
+
176
+ foreach yvar in ``varset'' {
177
+ est restore `yvar'
178
+ mat count_`suffix'[1, `mat_counter'] = e(N)
179
+ mat mean_`suffix'[1, `mat_counter'] = _b[`T']
180
+ mat Var_`suffix'[1, `mat_counter'] = _se[`T']
181
+ local pvalue = 2 * ttail(e(N) - e(df_m), abs(_b[`T']/_se[`T']))
182
+
183
+ est restore `yvar'
184
+ mat min_`suffix'[1, `mat_counter'] = _b[`T']
185
+ mat max_`suffix'[1, `mat_counter'] = _se[`T']
186
+ mat sum_`suffix'[1, `mat_counter'] = `pvalue'
187
+ mat pvalues[1, `pvalue_counter'] = `pvalue'
188
+ local mat_counter = `mat_counter' + 1
189
+ local pvalue_counter = `pvalue_counter' + 1
190
+ }
191
+ }
192
+
193
+ clear
194
+
195
+ mat pvalues = pvalues'
196
+ svmat float pvalues, name(pval)
197
+
198
+ do "../../lib/stata/SharpenPValues.do"
199
+
200
+ * Note that SWB index is the fifth variable
201
+ * Save SWB index FDR sharpened q value as a scalar
202
+ local fdr_val = bky06_qval[5]
203
+ *latex_precision, name(`file_suffix'SWBfdr) value(`fdr_val') digits(2)
204
+
205
+ mkmat bky06_qval, matrix(sharpened_vals)
206
+ mat sharpened_vals = sharpened_vals'
207
+
208
+ import_data
209
+ reshape_self_control_outcomes
210
+ make_treatment_indicators
211
+
212
+ local pvalue_counter = 1
213
+ foreach varset in swb_vars {
214
+ local suffix swb
215
+
216
+ local mat_counter = 1
217
+ foreach yvar in ``varset'' {
218
+ mat range_`suffix'[1, `mat_counter'] = sharpened_vals[1, `pvalue_counter']
219
+ local mat_counter = `mat_counter' + 1
220
+ local pvalue_counter = `pvalue_counter' + 1
221
+ }
222
+
223
+ estpost tabstat ``varset'' if `T'==0, statistics(mean, sd, max, min, count) columns(statistics)
224
+ foreach value in count {
225
+ estadd mat `value' = `value'_`suffix', replace
226
+ }
227
+ est store `varset'
228
+
229
+ estpost tabstat ``varset'', statistics(mean, Var, max, min, sum, range) columns(statistics)
230
+ foreach value in mean Var max min sum range {
231
+ estadd mat `value' = `value'_`suffix', replace
232
+ }
233
+ est store `varset'_reg
234
+
235
+ esttab `varset' using "output/`varset'_descriptive_stats_`file_suffix'.tex", ///
236
+ label cells((mean(fmt(%8.2fc)) sd(fmt(%8.2fc)) min(fmt(%8.0fc)) max(fmt(%8.0fc)) count(fmt(%8.0fc)))) ///
237
+ collabels("\shortstack{Mean}" "\shortstack{Standard\\deviation}" "\shortstack{Minimum\\value}" "\shortstack{Maximum\\value}" "\shortstack{N in\\regression}") ///
238
+ noobs replace nomtitle nonumbers compress
239
+
240
+ esttab `varset'_reg using "output/`varset'_estimates_`file_suffix'.tex", ///
241
+ label cells((mean(fmt(%8.2fc)) Var(fmt(%8.2fc)) min(fmt(%8.2fc)) max(fmt(%8.2fc)) sum(fmt(%8.2fc)) range(fmt(%8.2fc)))) ///
242
+ collabels("\shortstack{(1)\\Treatment\\effect\\(original\\units)}" "\shortstack{(2)\\Standard\\error\\(original\\units)}" "\shortstack{(3)\\Treatment\\effect\\(SD units)}" ///
243
+ "\shortstack{(4)\\Standard\\error\\(SD units)}" "\shortstack{(5)\\P-value}" "\shortstack{(6)\\Sharpened\\FDR-\\adjusted\\q-value}") ///
244
+ noobs replace nomtitle nonumbers compress
245
+ }
246
+ end
247
+
248
+ ***********
249
+ * Execute *
250
+ ***********
251
+
252
+ main
17/replication_package/code/analysis/treatment_effects/code/HabitFormation.do ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Habit formation and naivete
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ yaml global STRATA = YAML.metadata.strata
18
+ end
19
+
20
+ program define_plot_settings
21
+ global COLOR_MAROON ///
22
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
23
+
24
+ global COLOR_BLACK ///
25
+ mcolor(black) ciopts(recast(rcap) lcolor(black))
26
+
27
+ global COLOR_GRAY ///
28
+ mcolor(gray) ciopts(recast(rcap) lcolor(gray))
29
+
30
+ global COLOR_NAVY ///
31
+ mcolor(navy) ciopts(recast(rcap) lcolor(navy))
32
+
33
+ global COEFPLOT_SETTINGS_MINUTES ///
34
+ vertical ///
35
+ yline(0, lwidth(thin) lcolor(black)) ///
36
+ bgcolor(white) graphregion(color(white)) ///
37
+ legend(cols(3) region(lcolor(white))) ///
38
+ xsize(6.5) ysize(4.5) ///
39
+ ytitle("Treatment effect (minutes/day)" " ") ///
40
+ coeflabels(B_P3 = `"Period 3"' ///
41
+ B_P4 = `"Period 4"' ///
42
+ B_P5 = `"Period 5"')
43
+
44
+ global COEFPLOT_SETTINGS_MINUTES_DOUBLE ///
45
+ vertical ///
46
+ yline(0, lwidth(thin) lcolor(black)) ///
47
+ bgcolor(white) graphregion(color(white)) ///
48
+ xsize(6.5) ysize(4.5) ///
49
+ ytitle("Treatment effect (minutes/day)" " ") ///
50
+ ytitle("Treatment effect (ICW index)" " ", axis(2)) ///
51
+ coeflabels(B_P3 = `"Period 3"' ///
52
+ B_P4 = `"Period 4"' ///
53
+ B_P5 = `"Period 5"') ///
54
+ legend(cols(1) region(lcolor(white))) ///
55
+ ylabel(-60(30)60) ///
56
+ ylabel(-0.1(0.05)0.1, axis(2))
57
+ end
58
+
59
+ **********************
60
+ * Analysis functions *
61
+ **********************
62
+
63
+ program main
64
+ define_constants
65
+ define_plot_settings
66
+ import_data
67
+
68
+ survey_effects_rsi
69
+ end
70
+
71
+ program import_data
72
+ use "input/final_data_sample.dta", clear
73
+ end
74
+
75
+ program survey_effects_rsi
76
+ preserve
77
+
78
+ * Clean data
79
+ rename PD_*_UsageFITSBY UsageActual_*
80
+ rename S3_PredictUseNext_1_W UsagePredicted_P3
81
+ rename S3_PredictUseNext_2_W UsagePredicted_P4
82
+ rename S3_PredictUseNext_3_W UsagePredicted_P5
83
+
84
+ * Run regressions
85
+ foreach yvar in UsageActual {
86
+ foreach survey in P3 P4 P5 {
87
+ local baseline `yvar'_P1
88
+
89
+ gen_treatment, suffix(_`survey')
90
+ reg_treatment, yvar(`yvar'_`survey') suffix(_`survey') indep($STRATA `baseline')
91
+ est store `yvar'_`survey'
92
+ }
93
+ }
94
+
95
+ foreach yvar in UsagePredicted {
96
+ foreach survey in P3 P4 P5 {
97
+ local baseline UsageActual_P1
98
+
99
+ gen_treatment, suffix(_`survey')
100
+ reg_treatment, yvar(`yvar'_`survey') suffix(_`survey') indep($STRATA `baseline')
101
+ est store `yvar'_`survey'
102
+ }
103
+ }
104
+
105
+ * Plot regressions (by period)
106
+ coefplot (UsageActual*, label("Actual use") $COLOR_MAROON) ///
107
+ (UsagePredicted*, label("Predicted use") $COLOR_GRAY), ///
108
+ keep(B_*) ///
109
+ $COEFPLOT_SETTINGS_MINUTES
110
+
111
+ restore
112
+
113
+ graph export "output/habit_formation_fitsby.pdf", replace
114
+
115
+ end
116
+
117
+ ***********
118
+ * Execute *
119
+ ***********
120
+
121
+ main
17/replication_package/code/analysis/treatment_effects/code/Heterogeneity.do ADDED
@@ -0,0 +1,963 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Heterogeneity
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ yaml global STRATA = YAML.metadata.strata
18
+
19
+ global app_list Facebook Instagram Twitter Snapchat Browser YouTube Other
20
+ end
21
+
22
+ program define_plot_settings
23
+ global CISPIKE_VERTICAL_GRAPHOPTS ///
24
+ ylabel(#6) ///
25
+ xsize(6.5) ysize(4.5) ///
26
+ legend(cols(3))
27
+
28
+ global CISPIKE_HORIZONTAL_GRAPHOPTS ///
29
+ xlabel(#6) ///
30
+ xsize(6.5) ysize(6.5)
31
+
32
+ global CISPIKE_STACKED_GRAPHOPTS ///
33
+ xcommon row(2) ///
34
+ graphregion(color(white)) ///
35
+ xsize(6.5) ysize(8)
36
+
37
+ global CISPIKE_SETTINGS ///
38
+ spikecolor(maroon black gray) ///
39
+ cicolor(maroon black gray)
40
+
41
+ global COLOR_MAROON ///
42
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
43
+
44
+ global COLOR_LIGHT_RED ///
45
+ mcolor(maroon*0.7) ciopts(recast(rcap) lcolor(maroon*0.7))
46
+
47
+ global COLOR_DARK_RED ///
48
+ mcolor(maroon*1.3) ciopts(recast(rcap) lcolor(maroon*1.3))
49
+
50
+ global COLOR_LIGHT_GREY ///
51
+ mcolor(gray*0.8) ciopts(recast(rcap) lcolor(gray*0.8))
52
+
53
+ global COLOR_DARK_GREY ///
54
+ mcolor(gray*1.3) ciopts(recast(rcap) lcolor(gray*1.3))
55
+
56
+ global COLOR_DARK_GREEN ///
57
+ mcolor(teal) ciopts(recast(rcap) lcolor(teal))
58
+
59
+ global COLOR_LIGHT_GREEN ///
60
+ mcolor(eltgreen) ciopts(recast(rcap) lcolor(eltgreen))
61
+
62
+ global COLOR_BLACK ///
63
+ mcolor(black) ciopts(recast(rcap) lcolor(black))
64
+
65
+ global COLOR_GRAY ///
66
+ mcolor(gray) ciopts(recast(rcap) lcolor(gray))
67
+
68
+ global COEFPLOT_VERTICAL_SETTINGS ///
69
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon)) ///
70
+ yline(0, lwidth(thin) lcolor(black)) ///
71
+ bgcolor(white) graphregion(color(white)) ///
72
+ legend(rows(1) region(lcolor(white))) ///
73
+ xsize(8) ysize(4) ///
74
+ ytitle("Treatment effect (minutes/day)" " ")
75
+
76
+ global COEFPLOT_HORIZONTAL_HTE_SETTINGS ///
77
+ xline(0, lwidth(thin) lcolor(black)) ///
78
+ bgcolor(white) graphregion(color(white)) grid(w) ///
79
+ legend(cols(1) region(lcolor(white))) ///
80
+ xsize(6.5) ysize(6.5)
81
+
82
+ global COEFPLOT_HORIZONTAL_MED_SETTINGS ///
83
+ xline(0, lwidth(thin) lcolor(black)) ///
84
+ bgcolor(white) graphregion(color(white)) grid(w) ///
85
+ legend(rows(1) region(lcolor(white))) ///
86
+ xsize(6.5) ysize(6.5)
87
+
88
+ global SMALL_LABELS ///
89
+ xlabel(, labsize(small)) ///
90
+ xtitle(, size(small)) ///
91
+ ylabel(, labsize(small)) ///
92
+ ytitle(, size(small)) ///
93
+ legend(size(small))
94
+
95
+ global COEF_SMALL_LABELS ///
96
+ coeflabels(, labsize(small)) ///
97
+ $SMALL_LABELS
98
+ end
99
+
100
+ **********************
101
+ * Analysis functions *
102
+ **********************
103
+
104
+ program main
105
+ define_constants
106
+ define_plot_settings
107
+ import_data
108
+
109
+ get_temptation_ranks
110
+ get_usage_ranks
111
+ plot_temptation
112
+ reg_usage_by_app
113
+ reg_usage_by_app_combined
114
+ plot_limit_tight_by_app
115
+ reg_usage_by_time
116
+ reg_usage_by_time, fitsby
117
+ reg_usage_by_time_scaled
118
+ reg_usage_by_time_scaled, fitsby
119
+ reg_usage_by_person
120
+ reg_usage_by_person_p3
121
+ reg_usage_by_person, fitsby
122
+ reg_usage_by_person_p3, fitsby
123
+ reg_iv_stacked_by_person
124
+ plot_wtp_motivation
125
+ plot_limit_wtp
126
+ end
127
+
128
+ program import_data
129
+ use "input/final_data_sample.dta", clear
130
+ rename S1_IdealApp_Messenger S1_IdealApp_Messaging
131
+ end
132
+
133
+ program reshape_ideal_use
134
+ * Reshape data
135
+ keep UserID S1_IdealApp_*
136
+ reshape long S1_IdealApp_, i(UserID) j(app) string
137
+
138
+ * Recode data
139
+ encode app, generate(app_encode)
140
+
141
+ recode S1_IdealApp_ ///
142
+ (1 = -75 ) ///
143
+ (2 = -37.5) ///
144
+ (3 = -12.5) ///
145
+ (4 = 0 ) ///
146
+ (5 = 12.5) ///
147
+ (6 = 37.5) ///
148
+ (7 = 75 ) ///
149
+ (8 = 0 ), ///
150
+ gen(S1_IdealApp_recode)
151
+ end
152
+
153
+ program get_usage_ranks
154
+ * Preserve data
155
+ preserve
156
+
157
+ * Reshape data
158
+ keep UserID PD_P1_Usage_*
159
+
160
+ drop PD_P1_Usage_H*
161
+
162
+ reshape long PD_P1_Usage_, i(UserID) j(app) s
163
+ replace PD_P1_Usage_ = 0 if PD_P1_Usage_ == .
164
+
165
+ * Get temptation rankings
166
+ collapse (mean) PD_P1_Usage_, by(app)
167
+ drop if app == "Other"
168
+ gsort -PD_P1_Usage_
169
+ gen app_rank = _n
170
+
171
+ * Append other last
172
+ set obs `=_N+1'
173
+ replace app = "Other" if app == ""
174
+ replace app_rank = `=_N' if app_rank == .
175
+ labmask app_rank, values(app)
176
+
177
+ * Categorize apps
178
+ gen category = 2
179
+ replace category = 1 if ///
180
+ inlist(app, "Facebook", "Instagram", "Twitter", "Snapchat", "Browser", "YouTube")
181
+
182
+ * Save data
183
+ keep app app_rank category
184
+ save "temp/app_rank_usage.dta", replace
185
+
186
+ * Restore data
187
+ restore
188
+ end
189
+
190
+ program get_temptation_ranks
191
+ * Preserve data
192
+ preserve
193
+
194
+ * Reshape data
195
+ reshape_ideal_use
196
+
197
+ * Get temptation rankings
198
+ collapse (mean) S1_IdealApp_recode, by(app)
199
+ gsort +S1_IdealApp_recode
200
+ gen app_rank = _n
201
+
202
+ * Append other last
203
+ set obs `=_N+1'
204
+ replace app = "Other" if app == ""
205
+ replace app_rank = `=_N' if app_rank == .
206
+ labmask app_rank, values(app)
207
+
208
+ * Categorize apps
209
+ gen category = 2
210
+ replace category = 1 if ///
211
+ inlist(app, "Facebook", "Instagram", "Twitter", "Snapchat", "Browser", "YouTube")
212
+
213
+ * Save data
214
+ keep app app_rank category
215
+ save "temp/app_rank.dta", replace
216
+
217
+ * Restore data
218
+ restore
219
+ end
220
+
221
+ program gen_rank_labels
222
+ syntax, [prefix(str) suffix(str)]
223
+
224
+ * Preserve data
225
+ preserve
226
+
227
+ * Import ranks
228
+ use "temp/app_rank.dta", clear
229
+
230
+ global rank_labels ""
231
+ local N = _N
232
+
233
+ forvalues i = 1/`N' {
234
+ local app = app[`i']
235
+ global rank_labels "$rank_labels `prefix'`app'`suffix'"
236
+ }
237
+
238
+ * Restore data
239
+ restore
240
+ end
241
+
242
+ program gen_rank_labels_usage
243
+ syntax, [prefix(str) suffix(str)]
244
+
245
+ * Preserve data
246
+ preserve
247
+
248
+ * Import ranks
249
+ use "temp/app_rank_usage.dta", clear
250
+
251
+ global rank_labels_usage ""
252
+ local N = _N
253
+
254
+ forvalues i = 1/`N' {
255
+ local app = app[`i']
256
+ global rank_labels_usage "$rank_labels_usage `prefix'`app'`suffix'"
257
+ }
258
+
259
+ * Restore data
260
+ restore
261
+ end
262
+
263
+ program plot_temptation
264
+ * Preserve data
265
+ preserve
266
+
267
+ * Reshape data
268
+ reshape_ideal_use
269
+
270
+ * Merging in rankings
271
+ merge m:1 app using "temp/app_rank.dta", nogen assert(2 3) keep(3)
272
+
273
+ * Plot data (app categories together)
274
+ gen dummy = 1
275
+
276
+ //TODO: fix this bug
277
+ cispike S1_IdealApp_recode, ///
278
+ over1(dummy) over2(app_rank) ///
279
+ horizontal missing reverse $CISPIKE_SETTINGS ///
280
+ graphopts($CISPIKE_HORIZONTAL_GRAPHOPTS ///
281
+ ylabel(none, axis(2)) ///
282
+ xtitle(" " "Ideal use change (percent)") ///
283
+ legend(off) ///
284
+ $SMALL_LABELS)
285
+
286
+ graph export "output/overuse_by_app.pdf", replace
287
+
288
+ * Plot data (app categories separately)
289
+ cispike S1_IdealApp_recode if category == 1, ///
290
+ over1(dummy) over2(app_rank) over3(category) ///
291
+ horizontal missing reverse $CISPIKE_SETTINGS ///
292
+ graphopts($CISPIKE_HORIZONTAL_GRAPHOPTS ///
293
+ ylabel(none, axis(2)) ///
294
+ xtitle("") ///
295
+ legend(off) fysize(45))
296
+
297
+ graph save "output/overuse_by_app_fitsby.gph", replace
298
+
299
+ cispike S1_IdealApp_recode if category == 2, ///
300
+ over1(dummy) over2(app_rank) over3(category) ///
301
+ horizontal missing reverse $CISPIKE_SETTINGS ///
302
+ graphopts($CISPIKE_HORIZONTAL_GRAPHOPTS ///
303
+ ylabel(none, axis(2)) ///
304
+ xtitle(" " "Ideal use change (percent)") ///
305
+ legend(off))
306
+
307
+ graph save "output/overuse_by_app_non_fitsby.gph", replace
308
+
309
+ graph combine ///
310
+ "output/overuse_by_app_fitsby.gph" ///
311
+ "output/overuse_by_app_non_fitsby.gph", ///
312
+ $CISPIKE_STACKED_GRAPHOPTS
313
+
314
+ graph export "output/overuse_by_app_stacked.pdf", replace
315
+
316
+ * Restore data
317
+ restore
318
+ end
319
+
320
+ program reg_usage_by_app
321
+ est clear
322
+
323
+ foreach app in $app_list {
324
+ * Specify regression
325
+ cap drop `app'
326
+ cap gen `app' = PD_P5432_Usage_`app'
327
+ label var `app' "`app'"
328
+ local yvar `app'
329
+ local baseline PD_P1_Usage_`app'
330
+
331
+ * Run regression
332
+ gen_treatment, suffix(_`yvar') var(`yvar') simple
333
+ reg_treatment, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline') simple
334
+ est store Limit_Est_`yvar'
335
+ }
336
+
337
+ foreach app in $app_list {
338
+ * Specify regression
339
+ cap drop `app'
340
+ cap gen `app' = PD_P3_Usage_`app'
341
+ label var `app' "`app'"
342
+ local yvar `app'
343
+ local baseline PD_P1_Usage_`app'
344
+
345
+ * Run regression
346
+ gen_treatment, suffix(_`yvar') var(`yvar') simple
347
+ reg_treatment, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline') simple
348
+ est store Bonus_Est_`yvar'
349
+ }
350
+
351
+ local app_list_bonus Facebook_B Instagram_B Twitter_B Snapchat_B Browser_B YouTube_B Other_B
352
+
353
+ * Plot regressions
354
+ gen_rank_labels_usage, prefix("")
355
+
356
+ coefplot (Bonus_Est_*, keep(B_*) label("Bonus") $COLOR_MAROON) ///
357
+ (Limit_Est_*, keep(L_*) label("Limit") $COLOR_GRAY), ///
358
+ rename(L_* = "" B_* = "") ///
359
+ order($rank_labels_usage) vertical ///
360
+ $COEFPLOT_VERTICAL_SETTINGS
361
+
362
+ graph export "output/coef_usage_by_app.pdf", replace
363
+ end
364
+
365
+ program reg_usage_by_app_combined
366
+ est clear
367
+
368
+ foreach app in $app_list {
369
+ * Specify regression
370
+ cap drop `app'
371
+ cap gen `app' = PD_P432_Usage_`app'
372
+ label var `app' "`app'"
373
+ local yvar `app'
374
+ local baseline PD_P1_Usage_`app'
375
+
376
+ * Run regression
377
+ gen_treatment_combined, suffix(_`yvar') var(`yvar')
378
+ reg_treatment_combined, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline')
379
+ est store `yvar'
380
+ }
381
+
382
+ * Plot regressions
383
+ gen_rank_labels, prefix("C_")
384
+
385
+ coefplot $app_list, ///
386
+ keep(C_*) order($rank_labels) vertical ///
387
+ nooffsets $COEFPLOT_VERTICAL_SETTINGS ///
388
+ legend(off)
389
+
390
+ graph export "output/coef_usage_by_app_combined.pdf", replace
391
+ end
392
+
393
+ program plot_limit_tight_by_app
394
+ * Preserve data
395
+ preserve
396
+
397
+ * Make zero in areas where not all zeros
398
+ foreach time in P2 P3 P4 P5 P5432 P432 P43 {
399
+ foreach category in Facebook Instagram Twitter Snapchat Browser YouTube Other {
400
+ replace PD_`time'_LimitTight_`category' = 0 if PD_`time'_LimitTight != . & PD_`time'_LimitTight_`category' == .
401
+ }
402
+ }
403
+
404
+ * Reshape data
405
+ keep UserID *LimitTight_*
406
+
407
+ rename *LimitTight_* **
408
+ rename_but, varlist(UserID) prefix(limit)
409
+ reshape long limit, i(UserID) j(measure) string
410
+
411
+ split measure, p("_")
412
+ drop measure measure1
413
+ rename (measure2 measure3) (measure app)
414
+
415
+ * Recode data
416
+ encode measure, generate(measure_encode)
417
+
418
+ merge m:1 app using "temp/app_rank_usage.dta", nogen keep(3)
419
+
420
+ recode measure_encode ///
421
+ (1 = 1 "Period 2") ///
422
+ (2 = 2 "Period 3") ///
423
+ (3 = 3 "Period 4") ///
424
+ (6 = 4 "Period 5") ///
425
+ (4 = 5 "Periods 3 & 4") ///
426
+ (5 = 6 "Periods 2 to 4") ///
427
+ (7 = 7 "Periods 2 to 5"), ///
428
+ gen(measure_recode)
429
+
430
+ * Plot data (all periods together)
431
+ gen dummy = 1
432
+
433
+ cispike limit if measure_recode == 7, ///
434
+ over1(dummy) over2(app_rank) ///
435
+ $CISPIKE_SETTINGS ///
436
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
437
+ ytitle("Limit tightness (minutes/day)" " ") ///
438
+ legend(off))
439
+
440
+ graph export "output/cispike_limit_tight_combined_by_app.pdf", replace
441
+
442
+ * Plot data (by period)
443
+ cispike limit if measure_recode <= 3, ///
444
+ over1(measure_recode) over2(app_rank) ///
445
+ $CISPIKE_SETTINGS ///
446
+ graphopts($CISPIKE_VERTICAL_GRAPHOPTS ///
447
+ ytitle("Limit tightness (minutes/day)" " "))
448
+
449
+ graph export "output/cispike_limit_tight_by_app.pdf", replace
450
+
451
+ * Restore data
452
+ restore
453
+ end
454
+
455
+ program reg_usage_by_time
456
+ syntax, [fitsby]
457
+
458
+ est clear
459
+
460
+ * Determine FITSBY restriction
461
+ if ("`fitsby'" == "fitsby") {
462
+ local fitsby "FITSBY"
463
+ local suffix "_fitsby"
464
+ }
465
+ else {
466
+ local fitsby ""
467
+ local suffix ""
468
+ }
469
+
470
+ foreach hour of num 1(2)23 {
471
+ * Specify regression
472
+ cap drop H_`hour'
473
+ gen H_`hour' = PD_P432_Usage`fitsby'_H`hour'
474
+ label var H_`hour' "`hour'"
475
+ local yvar H_`hour'
476
+ local baseline PD_P1_Usage`fitsby'_H`hour'
477
+
478
+ * Run regression
479
+ gen_treatment, suffix(_`yvar') var(`yvar') simple
480
+ reg_treatment, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline') simple
481
+ est store L`yvar'
482
+
483
+ * run bonus regressions separately
484
+ cap drop H_`hour'
485
+ gen H_`hour' = PD_P3_Usage`fitsby'_H`hour'
486
+ label var H_`hour' "`hour'"
487
+ local yvar H_`hour'
488
+ local baseline PD_P1_Usage`fitsby'_H`hour'
489
+
490
+ * Run regression
491
+ gen_treatment, suffix(_`yvar') var(`yvar') simple
492
+ reg_treatment, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline') simple
493
+ est store B`yvar'
494
+ }
495
+
496
+ * Plot regressions
497
+ coefplot (BH_*, keep(B_*) label("Bonus") $COLOR_MAROON) ///
498
+ (LH_*, keep(L_*) label("Limit") $COLOR_GRAY), ///
499
+ rename(L_* = "" B_* = "") vertical ///
500
+ xtitle(" " "Hour") ///
501
+ $COEFPLOT_VERTICAL_SETTINGS ///
502
+ ytitle("Treatment effect (minutes/hour)" " ")
503
+
504
+ graph export "output/coef_usage_by_time`suffix'.pdf", replace
505
+
506
+ * Preserve data
507
+ preserve
508
+
509
+ * Reshape data
510
+ keep PD_P1_Usage`fitsby'_H*
511
+ collapse (mean) PD_P1_Usage`fitsby'_H*
512
+ gen dummy = 1
513
+ reshape long PD_P1_Usage`fitsby'_H, i(dummy) j(hour)
514
+
515
+ * Recode data
516
+ replace PD_P1_Usage`fitsby'_H = PD_P1_Usage`fitsby'_H / 2
517
+ replace hour = (hour + 1) / 2
518
+
519
+ * Label data
520
+ foreach hour of num 1(2)23 {
521
+ gen H_`hour' = .
522
+ label var H_`hour' "`hour'"
523
+ }
524
+
525
+ * Plot regressions (with usage)
526
+
527
+ // Manually set labels and legends for double axis figures
528
+ coefplot (BH_*, keep(B_*) label("Bonus") $COLOR_MAROON) ///
529
+ (LH_*, keep(L_*) label("Limit") $COLOR_BLACK), ///
530
+ rename(L_* = "" B_* = "") vertical ///
531
+ xtitle(" " "Hour") ///
532
+ $COEFPLOT_VERTICAL_SETTINGS ///
533
+ ytitle("Treatment effect (minutes/hour)" " ", axis(1)) ///
534
+ ytitle(" " "Usage (minutes/hour)", axis(2)) ///
535
+ ylabel(-4(2)4, axis(1)) yscale(range(-4, 4)) ///
536
+ ylabel(0(0.75)3, axis(2)) ///
537
+ yscale(alt) ///
538
+ addplot(bar PD_P1_Usage`fitsby'_H hour, ///
539
+ below yaxis(2) yscale(alt axis(2)) ///
540
+ color(gray%50) fintensity(100) barw(0.75))
541
+
542
+ graph export "output/coef_usage_by_time_usage`suffix'.pdf", replace
543
+
544
+ * Restore data
545
+ restore
546
+ end
547
+
548
+ program reg_usage_by_time_scaled
549
+ syntax, [fitsby]
550
+
551
+ est clear
552
+
553
+ * Determine FITSBY restriction
554
+ if ("`fitsby'" == "fitsby") {
555
+ local fitsby "FITSBY"
556
+ local suffix "_fitsby"
557
+ }
558
+ else {
559
+ local fitsby ""
560
+ local suffix ""
561
+ }
562
+
563
+ * Preserve data
564
+ preserve
565
+
566
+ foreach hour of num 1(2)23 {
567
+ display(`hour')
568
+ * Normalize usage // ASK ABOUT THIS
569
+ cap drop H_`hour'
570
+ sum PD_P432_Usage`fitsby'_H`hour' if S3_Bonus == 0 & S2_LimitType == 0
571
+ gen H_`hour' = PD_P432_Usage`fitsby'_H`hour' / r(mean)
572
+
573
+ * Specify regression
574
+ label var H_`hour' "`hour'"
575
+ local yvar H_`hour'
576
+ local baseline PD_P1_Usage`fitsby'_H`hour'
577
+
578
+ * Run regression
579
+ gen_treatment, suffix(_`yvar') var(`yvar') simple
580
+ reg_treatment, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline') simple
581
+ est store L`yvar'
582
+
583
+ * run bonus regressions separately
584
+ cap drop H_`hour'
585
+ sum PD_P3_Usage`fitsby'_H`hour' if S3_Bonus == 0 & S2_LimitType == 0
586
+ gen H_`hour' = PD_P3_Usage`fitsby'_H`hour' / r(mean)
587
+
588
+ * Specify regression
589
+ label var H_`hour' "`hour'"
590
+ local yvar H_`hour'
591
+ local baseline PD_P1_Usage`fitsby'_H`hour'
592
+
593
+ * Run regression
594
+ gen_treatment, suffix(_`yvar') var(`yvar') simple
595
+ reg_treatment, yvar(`yvar') suffix(_`yvar') indep($STRATA `baseline') simple
596
+ est store B`yvar'
597
+ }
598
+
599
+ * Plot regressions
600
+ coefplot (BH_*, keep(B_*) label("Bonus") $COLOR_MAROON) ///
601
+ (LH_*, keep(L_*) label("Limit") $COLOR_GRAY), ///
602
+ rename(L_* = "" B_* = "") vertical ///
603
+ xtitle(" " "Hour") ///
604
+ $COEFPLOT_VERTICAL_SETTINGS ///
605
+ ytitle("Treatment effect" "(share of Control group usage)" " ")
606
+
607
+ graph export "output/coef_usage_by_time_scaled`suffix'.pdf", replace
608
+
609
+ * Restore data
610
+ restore
611
+ end
612
+
613
+ program reg_usage_by_person
614
+ syntax, [fitsby]
615
+
616
+ est clear
617
+
618
+ * Determine FITSBY restriction
619
+ if ("`fitsby'" == "fitsby") {
620
+ local fitsby "FITSBY"
621
+ local suffix "_fitsby"
622
+ }
623
+ else {
624
+ local fitsby ""
625
+ local suffix ""
626
+ }
627
+
628
+ * Specify regressions
629
+ include "input/lib/stata/define_heterogeneity.do"
630
+
631
+
632
+ local label_E "Education"
633
+ local label_A "Age"
634
+ local label_G "Female"
635
+ local label_U "Baseline usage"
636
+ local label_R "Restriction index"
637
+ local label_L "Addiction index"
638
+
639
+ * Run regressions
640
+ foreach mod in /*I*/ E A G U R L {
641
+ foreach group in 0 1 {
642
+ foreach yvar in PD_P5432_Usage`FITSBY' {
643
+ local baseline PD_P1_Usage`FITSBY'
644
+ local if `mod'`group'
645
+
646
+ gen_treatment, suffix(_`mod') simple
647
+ label var L_`mod' "`label_`mod''"
648
+ label var B_`mod' "`label_`mod''"
649
+ reg_treatment, yvar(`yvar') suffix(_`mod') indep($STRATA `baseline') if(``if'') simple
650
+ est store `yvar'_`if'
651
+ }
652
+ }
653
+ }
654
+
655
+ * Plot regressions
656
+ coefplot (*1, label("Above median") $COLOR_DARK_GREY) ///
657
+ (*0, label("Below median") $COLOR_LIGHT_GREY), ///
658
+ keep(L_*) ///
659
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
660
+ xtitle(" " "Treatment effect (minutes/day)") ///
661
+ $COEF_SMALL_LABELS
662
+
663
+ graph export "output/coef_limit_usage_by_heterogeneity`suffix'.pdf", replace
664
+
665
+ coefplot (*1, label("Above median") $COLOR_DARK_RED) ///
666
+ (*0, label("Below median") $COLOR_LIGHT_RED), ///
667
+ keep(B_*) ///
668
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
669
+ xtitle(" " "Treatment effect (minutes/day)") ///
670
+ $COEF_SMALL_LABELS
671
+
672
+ graph export "output/coef_bonus_usage_by_heterogeneity`suffix'.pdf", replace
673
+ end
674
+
675
+ program reg_usage_by_person_p3
676
+ syntax, [fitsby]
677
+
678
+ est clear
679
+
680
+ * Determine FITSBY restriction
681
+ if ("`fitsby'" == "fitsby") {
682
+ local fitsby "FITSBY"
683
+ local suffix "_fitsby"
684
+ }
685
+ else {
686
+ local fitsby ""
687
+ local suffix ""
688
+ }
689
+
690
+ * Specify regressions
691
+ include "input/lib/stata/define_heterogeneity.do"
692
+
693
+
694
+ local label_E "Education"
695
+ local label_A "Age"
696
+ local label_G "Female"
697
+ local label_U "Baseline usage"
698
+ local label_R "Restriction index"
699
+ local label_L "Addiction index"
700
+
701
+ * Run regressions
702
+ foreach mod in /*I*/ E A G U R L {
703
+ foreach group in 0 1 {
704
+ foreach yvar in PD_P3_Usage`FITSBY' {
705
+ local baseline PD_P1_Usage`FITSBY'
706
+ local if `mod'`group'
707
+
708
+ gen_treatment, suffix(_`mod') simple
709
+ label var L_`mod' "`label_`mod''"
710
+ label var B_`mod' "`label_`mod''"
711
+ reg_treatment, yvar(`yvar') suffix(_`mod') indep($STRATA `baseline') if(``if'') simple
712
+ est store `yvar'_`if'
713
+ }
714
+ }
715
+ }
716
+
717
+ * Plot regressions
718
+ coefplot (*1, label("Above median") $COLOR_DARK_GREY) ///
719
+ (*0, label("Below median") $COLOR_LIGHT_GREY), ///
720
+ keep(L_*) ///
721
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
722
+ xtitle(" " "Treatment effect (minutes/day)") ///
723
+ $COEF_SMALL_LABELS
724
+
725
+ graph export "output/coef_limit_usage_by_heterogeneity_P3`suffix'.pdf", replace
726
+
727
+ coefplot (*1, label("Above median") $COLOR_DARK_RED) ///
728
+ (*0, label("Below median") $COLOR_LIGHT_RED), ///
729
+ keep(B_*) ///
730
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
731
+ xtitle(" " "Treatment effect (minutes/day)") ///
732
+ $COEF_SMALL_LABELS
733
+
734
+ graph export "output/coef_bonus_usage_by_heterogeneity_P3`suffix'.pdf", replace
735
+ end
736
+
737
+
738
+
739
+
740
+
741
+ program reshape_self_control_outcomes
742
+ * Reshape wide to long
743
+ gen S4_Usage_FITSBY = PD_P3_UsageFITSBY
744
+ gen S3_Usage_FITSBY = PD_P2_UsageFITSBY
745
+
746
+ keep UserID S3_Bonus S2_LimitType Stratifier ///
747
+ S1_Income S1_Education S0_Age S0_Gender ///
748
+ StratWantRestrictionIndex StratAddictionLifeIndex PD_P1_UsageFITSBY ///
749
+ S*_Usage_FITSBY ///
750
+ S*_PhoneUseChange_N ///
751
+ S*_AddictionIndex_N ///
752
+ S*_SMSIndex_N ///
753
+ S*_SWBIndex_N ///
754
+ S*_LifeBetter_N ///
755
+ S*_index_well_N
756
+
757
+ local indep UserID S3_Bonus S2_LimitType Stratifier S1_* S0_* Strat* PD_*
758
+ rename_but, varlist(`indep') prefix(outcome)
759
+ reshape long outcome, i(UserID) j(measure) string
760
+
761
+ split measure, p(_)
762
+ replace measure = measure2 + "_" + measure3 + "_" + measure4 if measure4 != ""
763
+ replace measure = measure2 + "_" + measure3 if measure4 == ""
764
+ rename measure1 survey
765
+ drop measure2 measure3 measure4
766
+
767
+ * Reshape long to wide
768
+ reshape wide outcome, i(UserID survey) j(measure) string
769
+ rename outcome* *
770
+
771
+ * Recode data
772
+ encode survey, gen(S)
773
+
774
+ * Label data
775
+ label var PhoneUseChange "Ideal use change"
776
+ label var AddictionIndex "Addiction scale x (-1)"
777
+ label var SMSIndex "SMS addiction scale x (-1)"
778
+ label var LifeBetter "Phone makes life better"
779
+ label var SWBIndex "Subjective well-being"
780
+ label var index_well "Survey index"
781
+ end
782
+
783
+ program reg_iv_stacked_by_person
784
+ est clear
785
+
786
+ * Preserve data
787
+ preserve
788
+
789
+ * Reshape data
790
+ reshape_self_control_outcomes
791
+
792
+ * Specify regression
793
+ local yvarset ///
794
+ PhoneUseChange_N ///
795
+ AddictionIndex_N ///
796
+ SMSIndex_N ///
797
+ LifeBetter_N ///
798
+ SWBIndex_N ///
799
+ index_well_N
800
+
801
+ include "input/lib/stata/define_heterogeneity.do"
802
+
803
+ * Run regressions
804
+ foreach if in /*I0 I1*/ E0 E1 A0 A1 G0 G1 U0 U1 R0 R1 L0 L1 {
805
+ foreach yvar in `yvarset' {
806
+ local baseline = "S1_`yvar'"
807
+
808
+ * Treatment indicators
809
+ gen_treatment, suffix(_`yvar') simple
810
+
811
+ * Specify regression
812
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
813
+
814
+ * Run regression
815
+ gen_usage_stacked, yvar(`yvar') suffix(_`yvar') var(`yvar')
816
+ reg_usage_stacked, yvar(`yvar') suffix(_`yvar') indep(`indep') if(``if'')
817
+ est store U_`yvar'_`if'
818
+ }
819
+ }
820
+
821
+ * Plot regressions
822
+ foreach mod in /*I*/ E A G U R L {
823
+ local coef_plot0 ///
824
+ label("`label_`mod'0'") ///
825
+ mcolor(edkblue*0.7) ciopts(recast(rcap) lcolor(edkblue*0.7))
826
+
827
+ local coef_plot1 ///
828
+ label("`label_`mod'1'") ///
829
+ mcolor(edkblue*1.3) ciopts(recast(rcap) lcolor(edkblue*1.3))
830
+
831
+ coefplot (U_*_`mod'1, `coef_plot1') ///
832
+ (U_*_`mod'0, `coef_plot0'), ///
833
+ keep(U_*) ///
834
+ $COEFPLOT_HORIZONTAL_HTE_SETTINGS ///
835
+ xtitle(" " "Treatment effect" "(standard deviations per hour/day of use)") ///
836
+ $COEF_SMALL_LABELS
837
+
838
+ graph export "output/coef_iv_self_control_by_`suffix_`mod''.pdf", replace
839
+ }
840
+
841
+ * Restore data
842
+ restore
843
+ end
844
+
845
+ program plot_wtp_motivation
846
+ * Preserve data
847
+ preserve
848
+
849
+ * Specify groups
850
+ include "input/lib/stata/define_heterogeneity.do"
851
+
852
+ foreach mod in I E A G U R L {
853
+ foreach group in 0 1 {
854
+ gen Motivation_`mod'_`group' = S2_Motivation ``mod'`group''
855
+ }
856
+ }
857
+
858
+ * Reshape data
859
+ keep UserID Motivation_*
860
+ reshape long Motivation, i(UserID) j(measure) string
861
+ split measure, p("_")
862
+ drop measure measure1
863
+ rename measure2 measure
864
+ rename measure3 group
865
+
866
+ * Recode data
867
+ encode measure, generate(measure_encode)
868
+ encode group, generate(group_encode)
869
+
870
+ recode measure_encode ///
871
+ (2 = 1 "Education") ///
872
+ (1 = 2 "Age") ///
873
+ (3 = 3 "Female") ///
874
+ (7 = 4 "Baseline usage") ///
875
+ (5 = 5 "Restriction index") ///
876
+ (6 = 6 "Addiction index") ///
877
+ (4 = 7 "Income less than $50,000"), ///
878
+ gen(measure_recode)
879
+
880
+ recode group_encode ///
881
+ (1 = 2 "Below median") ///
882
+ (2 = 1 "Above median"), ///
883
+ gen(group_recode)
884
+
885
+ * Plot data (app categories together)
886
+ cispike Motivation if measure_recode != 7, ///
887
+ over1(group_recode) over2(measure_recode) ///
888
+ horizontal reverse ///
889
+ spikecolor(maroon gray) ///
890
+ cicolor(maroon gray) ///
891
+ graphopts($CISPIKE_HORIZONTAL_GRAPHOPTS ///
892
+ xtitle(" " "Behavior change premium") ///
893
+ $SMALL_LABELS)
894
+
895
+ graph export "output/cispike_motivation_by_group.pdf", replace
896
+
897
+ * Restore data
898
+ restore
899
+ end
900
+
901
+ program plot_limit_wtp
902
+ * Preserve data
903
+ preserve
904
+
905
+ * Specify groups
906
+ include "input/lib/stata/define_heterogeneity.do"
907
+
908
+ foreach mod in I E A G U R L {
909
+ foreach group in 0 1 {
910
+ gen WTP_`mod'_`group' = S3_MPLLimit ``mod'`group''
911
+ }
912
+ }
913
+
914
+ * Reshape data
915
+ keep UserID WTP_*
916
+ reshape long WTP, i(UserID) j(measure) string
917
+ split measure, p("_")
918
+ drop measure measure1
919
+ rename measure2 measure
920
+ rename measure3 group
921
+
922
+ * Recode data
923
+ encode measure, generate(measure_encode)
924
+ encode group, generate(group_encode)
925
+
926
+ recode measure_encode ///
927
+ (2 = 1 "Education") ///
928
+ (1 = 2 "Age") ///
929
+ (3 = 3 "Female") ///
930
+ (7 = 4 "Baseline usage") ///
931
+ (5 = 5 "Restriction index") ///
932
+ (6 = 6 "Addiction index") ///
933
+ (4 = 7 "Income less than $50,000"), ///
934
+ gen(measure_recode)
935
+
936
+ recode group_encode ///
937
+ (1 = 2 "Below median") ///
938
+ (2 = 1 "Above median"), ///
939
+ gen(group_recode)
940
+
941
+ * Plot data (app categories together)
942
+ cispike WTP if measure_recode != 7, ///
943
+ over1(group_recode) over2(measure_recode) ///
944
+ horizontal reverse ///
945
+ spikecolor(maroon gray) ///
946
+ cicolor(maroon gray) ///
947
+ graphopts($CISPIKE_HORIZONTAL_GRAPHOPTS ///
948
+ xtitle(" " "Willingness to pay for limit ($)") ///
949
+ $SMALL_LABELS)
950
+
951
+ graph export "output/cispike_limit_motivation_by_group.pdf", replace
952
+
953
+ * Restore data
954
+ restore
955
+ end
956
+
957
+
958
+ ***********
959
+ * Execute *
960
+ ***********
961
+
962
+ main
963
+
17/replication_package/code/analysis/treatment_effects/code/HeterogeneityInstrumental.do ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Response to commitment, moderated by demand for flexibility
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_constants
16
+ yaml read YAML using "input/config.yaml"
17
+ yaml global STRATA = YAML.metadata.strata
18
+ end
19
+
20
+ program define_plot_settings
21
+ global COEFPLOT_HORIZONTAL_SETTINGS ///
22
+ xline(0, lwidth(thin) lcolor(black)) ///
23
+ bgcolor(white) graphregion(color(white)) grid(w) ///
24
+ legend(cols(1) region(lcolor(white))) ///
25
+ xsize(6.5) ysize(6.5)
26
+
27
+ global COEFPLOT_HORIZONTAL_MED_SETTINGS ///
28
+ xline(0, lwidth(thin) lcolor(black)) ///
29
+ bgcolor(white) graphregion(color(white)) grid(w) ///
30
+ legend(rows(1) region(lcolor(white))) ///
31
+ xsize(6.5) ysize(6.5)
32
+
33
+ global SMALL_LABELS ///
34
+ xlabel(, labsize(small)) ///
35
+ xtitle(, size(small)) ///
36
+ ylabel(, labsize(small)) ///
37
+ ytitle(, size(small)) ///
38
+ legend(size(small))
39
+
40
+ global COEF_SMALL_LABELS ///
41
+ coeflabels(, labsize(small)) ///
42
+ $SMALL_LABELS
43
+
44
+ global COEFPLOT_SETTINGS_STD ///
45
+ xline(0, lwidth(thin) lcolor(black)) ///
46
+ bgcolor(white) graphregion(color(white)) grid(w) ///
47
+ legend(rows(1) region(lcolor(white))) ///
48
+ xsize(6.5) ysize(4.5) ///
49
+ xtitle(" " "Treatment effect (standard deviations per hour/day of use)")
50
+
51
+ global COLOR_MAROON ///
52
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
53
+
54
+ global COLOR_LIGHT_RED ///
55
+ mcolor(maroon*0.7) ciopts(recast(rcap) lcolor(maroon*0.7))
56
+
57
+ global COLOR_DARK_RED ///
58
+ mcolor(maroon*1.3) ciopts(recast(rcap) lcolor(maroon*1.3))
59
+
60
+ global COLOR_LIGHT_GREY ///
61
+ mcolor(gray*0.8) ciopts(recast(rcap) lcolor(gray*0.8))
62
+
63
+ global COLOR_DARK_GREY ///
64
+ mcolor(gray*1.3) ciopts(recast(rcap) lcolor(gray*1.3))
65
+
66
+ end
67
+
68
+ **********************
69
+ * Analysis functions *
70
+ **********************
71
+
72
+ program main
73
+ define_constants
74
+ define_plot_settings
75
+ import_data
76
+
77
+ reg_survey_heterogeneity
78
+ reg_iv_self_control_no_B3
79
+ reg_welfare_itt
80
+ reg_welfare_late
81
+ end
82
+
83
+ program import_data
84
+ use "input/final_data_sample.dta", clear
85
+ end
86
+
87
+ program gen_coefficient
88
+ syntax, var(str) suffix(str) label_var(str)
89
+
90
+ cap drop C`suffix'
91
+ gen C`suffix' = `var'
92
+
93
+ local vlabel: variable label `label_var'
94
+ label var C`suffix' "`vlabel'"
95
+ end
96
+
97
+ program reshape_self_control_outcomes_h
98
+ * Reshape wide to long
99
+ gen S4_Usage_FITSBY = PD_P3_UsageFITSBY
100
+ gen S3_Usage_FITSBY = PD_P2_UsageFITSBY
101
+
102
+ keep UserID S3_Bonus S2_LimitType Stratifier ///
103
+ S1_Income S1_Education S0_Age S0_Gender ///
104
+ StratWantRestrictionIndex StratAddictionLifeIndex PD_P1_UsageFITSBY ///
105
+ S*_Usage_FITSBY ///
106
+ S*_PhoneUseChange_N ///
107
+ S*_AddictionIndex_N ///
108
+ S*_SMSIndex_N ///
109
+ S*_SWBIndex_N ///
110
+ S*_LifeBetter_N ///
111
+ S*_index_well_N
112
+
113
+ local indep UserID S3_Bonus S2_LimitType Stratifier S1_* S0_* Strat* PD_*
114
+ rename_but, varlist(`indep') prefix(outcome)
115
+ reshape long outcome, i(UserID) j(measure) string
116
+
117
+ split measure, p(_)
118
+ replace measure = measure2 + "_" + measure3 + "_" + measure4 if measure4 != ""
119
+ replace measure = measure2 + "_" + measure3 if measure4 == ""
120
+ rename measure1 survey
121
+ drop measure2 measure3 measure4
122
+
123
+ * Reshape long to wide
124
+ reshape wide outcome, i(UserID survey) j(measure) string
125
+ rename outcome* *
126
+
127
+ * Recode data
128
+ encode survey, gen(S)
129
+
130
+ * Label data
131
+ label var PhoneUseChange "Ideal use change"
132
+ label var AddictionIndex "Addiction scale x (-1)"
133
+ label var SMSIndex "SMS addiction scale x (-1)"
134
+ label var LifeBetter "Phone makes life better"
135
+ label var SWBIndex "Subjective well-being"
136
+ label var index_well "Survey index"
137
+ end
138
+
139
+ program reshape_self_control_outcomes
140
+ * Reshape wide to long
141
+ gen S4_Usage_FITSBY = PD_P3_UsageFITSBY
142
+ gen S3_Usage_FITSBY = PD_P2_UsageFITSBY
143
+
144
+ keep UserID S3_Bonus S2_LimitType Stratifier ///
145
+ S*_Usage_FITSBY ///
146
+ S*_PhoneUseChange_N ///
147
+ S*_AddictionIndex_N ///
148
+ S*_SMSIndex_N ///
149
+ S*_SWBIndex_N ///
150
+ S*_LifeBetter_N ///
151
+ S*_index_well_N
152
+
153
+ local indep UserID S3_Bonus S2_LimitType Stratifier S1_*
154
+ rename_but, varlist(`indep') prefix(outcome)
155
+ reshape long outcome, i(`indep') j(measure) string
156
+
157
+ split measure, p(_)
158
+ replace measure = measure2 + "_" + measure3 + "_" + measure4 if measure4 != ""
159
+ replace measure = measure2 + "_" + measure3 if measure4 == ""
160
+ rename measure1 survey
161
+ drop measure2 measure3 measure4
162
+
163
+ * Reshape long to wide
164
+ reshape wide outcome, i(UserID survey) j(measure) string
165
+ rename outcome* *
166
+
167
+ * Recode data
168
+ encode survey, gen(S)
169
+
170
+ * Label data
171
+ label var PhoneUseChange "Ideal use change"
172
+ label var AddictionIndex "Addiction scale x (-1)"
173
+ label var SMSIndex "SMS addiction scale x (-1)"
174
+ label var LifeBetter "Phone makes life better"
175
+ label var SWBIndex "Subjective well-being"
176
+ label var index_well "Survey index"
177
+ end
178
+
179
+ program reg_usage_stacked_no_B3
180
+ syntax, yvar(str) [suffix(str) indep(str) if(str)]
181
+
182
+ cap drop i_S4
183
+ gen i_S4 = S - 1
184
+ gen B_`yvar'4 = i_S4 * B_`yvar'
185
+
186
+ ivregress 2sls `yvar' (U`suffix' = B_`yvar'4 i.S#L_`yvar') `indep' `if', robust
187
+ end
188
+
189
+ program reg_survey_heterogeneity
190
+ syntax
191
+
192
+ est clear
193
+
194
+ preserve
195
+ * Reshape data
196
+ reshape_self_control_outcomes_h
197
+
198
+ * Specify regression
199
+ local yvarset ///
200
+ PhoneUseChange_N ///
201
+ AddictionIndex_N ///
202
+ SMSIndex_N ///
203
+ LifeBetter_N ///
204
+ SWBIndex_N ///
205
+ index_well_N
206
+
207
+ include "input/lib/stata/define_heterogeneity.do"
208
+
209
+
210
+ * Run regressions
211
+ foreach if in R0 R1 L0 L1 U0 U1 {
212
+ foreach yvar in `yvarset' {
213
+ local baseline = "S1_`yvar'"
214
+
215
+ * Treatment indicators
216
+ gen_treatment, suffix(_`yvar') simple
217
+ cap drop B3_`yvar'
218
+ cap drop B4_`yvar'
219
+ gen B3_`yvar' = B_`yvar' * (S == 1)
220
+ gen B4_`yvar' = B_`yvar' * (S == 2)
221
+
222
+ * Specify regression
223
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
224
+
225
+ * Limit
226
+ gen_coefficient, var(L_`yvar') suffix(_`yvar') label_var(`yvar')
227
+ reg `yvar' C_`yvar' B3_`yvar' B4_`yvar' `indep' ``if'', robust cluster(UserID)
228
+ est store L_`yvar'_`if'
229
+
230
+ * Bonus
231
+ gen_coefficient, var(B4_`yvar') suffix(_`yvar') label_var(`yvar')
232
+ reg `yvar' L_`yvar' B3_`yvar' C_`yvar' `indep' ``if'', robust cluster(UserID)
233
+ est store B_`yvar'_`if'
234
+ }
235
+ }
236
+
237
+ * Plot regressions
238
+ foreach mod in R L U {
239
+ local coef_plot0 ///
240
+ label("`label_`mod'0'") ///
241
+ mcolor(gray) ciopts(recast(rcap) lcolor(gray))
242
+
243
+ local coef_plot1 ///
244
+ label("`label_`mod'1'") ///
245
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
246
+
247
+ coefplot (L_*_`mod'1, `coef_plot1') ///
248
+ (L_*_`mod'0, `coef_plot0'), ///
249
+ keep(C_*) ///
250
+ $COEFPLOT_HORIZONTAL_SETTINGS ///
251
+ xtitle(" " "Treatment effect" "(standard deviations)") ///
252
+ $COEF_SMALL_LABELS
253
+
254
+ graph export "output/coef_limit_itt_by_`suffix_`mod''.pdf", replace
255
+
256
+ coefplot (B_*_`mod'1, `coef_plot1') ///
257
+ (B_*_`mod'0, `coef_plot0'), ///
258
+ keep(C_*) ///
259
+ $COEFPLOT_HORIZONTAL_SETTINGS ///
260
+ xtitle(" " "Treatment effect" "(standard deviations)") ///
261
+ $COEF_SMALL_LABELS
262
+
263
+ graph export "output/coef_bonus_itt_by_`suffix_`mod''.pdf", replace
264
+ }
265
+
266
+ * Restore data
267
+ restore
268
+
269
+ end
270
+
271
+ program reg_iv_self_control_no_B3
272
+ est clear
273
+
274
+ * Preserve data
275
+ preserve
276
+
277
+ * Reshape data
278
+ reshape_self_control_outcomes
279
+
280
+ * Specify regression
281
+ local yvarset ///
282
+ PhoneUseChange_N ///
283
+ AddictionIndex_N ///
284
+ SMSIndex_N ///
285
+ LifeBetter_N ///
286
+ SWBIndex_N ///
287
+ index_well_N
288
+
289
+ * Run regressions
290
+ foreach yvar in `yvarset' {
291
+ local baseline = "S1_`yvar'"
292
+
293
+ * Treatment indicators
294
+ gen_treatment, suffix(_`yvar') simple
295
+
296
+ * Specify regression
297
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
298
+
299
+ * Run regression
300
+ gen_usage_stacked, yvar(`yvar') suffix(_`yvar') var(`yvar')
301
+ reg_usage_stacked_no_B3, yvar(`yvar') suffix(_`yvar') indep(`indep')
302
+ est store U_`yvar'
303
+ }
304
+
305
+ * Plot regressions
306
+ coefplot (U_*, $COLOR_MAROON), ///
307
+ keep(U_*) ///
308
+ $COEFPLOT_SETTINGS_STD ///
309
+ legend(off)
310
+
311
+ graph export "output/coef_iv_self_control_no_B3.pdf", replace
312
+
313
+ * Restore data
314
+ restore
315
+ end
316
+
317
+ program reg_welfare_itt
318
+ est clear
319
+
320
+ preserve
321
+ * Reshape data
322
+ reshape_self_control_outcomes_h
323
+
324
+ * Specify regression
325
+ local yvar index_well_N
326
+
327
+ include "input/lib/stata/define_heterogeneity.do"
328
+
329
+ local label_E "Education"
330
+ local label_A "Age"
331
+ local label_G "Female"
332
+ local label_U "Baseline usage"
333
+ local label_R "Restriction index"
334
+ local label_L "Addiction index"
335
+
336
+ gen MG_Indicator = 0 if S0_Gender == 1
337
+ replace MG_Indicator = 1 if S0_Gender == 2
338
+
339
+ local baseline = "S1_`yvar'"
340
+
341
+ * Treatment indicators
342
+ gen_treatment, simple
343
+ cap drop B3_`yvar'
344
+ cap drop B4_`yvar'
345
+ gen B3 = B * (S == 1)
346
+ gen B4 = B * (S == 2)
347
+
348
+ * Specify regression
349
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
350
+
351
+ * Run regressions
352
+ foreach group in E A G U R L {
353
+ foreach s in 0 1 {
354
+
355
+ local if "if M`group'_Indicator == `s'"
356
+ * Limit
357
+ cap drop C_`group'
358
+ gen C_`group' = L
359
+ label var C_`group' "`label_`group''"
360
+ reg `yvar' C_`group' B3 B4 `indep' ``if'', robust cluster(UserID)
361
+ est store L_`group'`s'
362
+
363
+ * Bonus
364
+ cap drop C_`group'
365
+ gen C_`group' = B4
366
+ label var C_`group' "`label_`group''"
367
+ reg `yvar' L B3 C_`group' `indep' ``if'', robust cluster(UserID)
368
+ est store B_`group'`s'
369
+ }
370
+ }
371
+
372
+ * Plot regressions
373
+
374
+ local coef_plot0 ///
375
+ label("Below median")
376
+
377
+ local coef_plot1 ///
378
+ label("Above median")
379
+
380
+ coefplot (L*1, `coef_plot1' $COLOR_DARK_GREY) ///
381
+ (L*0, `coef_plot0' $COLOR_LIGHT_GREY), ///
382
+ keep(C_*) ///
383
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
384
+ xtitle(" " "Treatment effect" "(standard deviations)") ///
385
+ $COEF_SMALL_LABELS
386
+
387
+ graph export "output/coef_heterogenous_limit_itt_welfare.pdf", replace
388
+
389
+ coefplot (B*1, `coef_plot1' $COLOR_DARK_RED) ///
390
+ (B*0, `coef_plot0' $COLOR_LIGHT_RED), ///
391
+ keep(C_*) ///
392
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
393
+ xtitle(" " "Treatment effect" "(standard deviations)") ///
394
+ $COEF_SMALL_LABELS
395
+
396
+ graph export "output/coef_heterogenous_bonus_itt_welfare.pdf", replace
397
+
398
+ * Restore data
399
+ restore
400
+ end
401
+
402
+ program reg_welfare_late
403
+ est clear
404
+
405
+ preserve
406
+ * Reshape data
407
+ reshape_self_control_outcomes_h
408
+
409
+ * Specify regression
410
+ local yvar index_well_N
411
+
412
+ include "input/lib/stata/define_heterogeneity.do"
413
+
414
+ local label_E "Education"
415
+ local label_A "Age"
416
+ local label_G "Female"
417
+ local label_U "Baseline usage"
418
+ local label_R "Restriction index"
419
+ local label_L "Addiction index"
420
+
421
+ gen MG_Indicator = 0 if S0_Gender == 1
422
+ replace MG_Indicator = 1 if S0_Gender == 2
423
+
424
+
425
+ local baseline = "S1_`yvar'"
426
+
427
+ * Specify regression
428
+ local indep i.S i.S#$STRATA i.S#c.`baseline'
429
+
430
+ * Run regressions
431
+ foreach group in E A G U R L {
432
+ foreach s in 0 1 {
433
+
434
+ local if "if M`group'_Indicator == `s'"
435
+
436
+ * Create usage variable (make negative per issue 184 comments)
437
+ cap drop U_`group'
438
+ gen U_`group' = -1 * Usage_FITSBY
439
+
440
+ * Converts usage to hours /day from minutes/day
441
+ replace U_`group' = U_`group'/60
442
+ label var U_`group' "`label_`group''"
443
+
444
+ * Run regression
445
+ gen_treatment, suffix(_`yvar') simple
446
+ reg_usage_stacked, yvar(`yvar') suffix(_`group') indep(`indep') if(``if'')
447
+
448
+ est store U_`group'`s'
449
+ }
450
+ }
451
+
452
+ * Plot regressions
453
+
454
+ local coef_plot0 ///
455
+ label("Below median") ///
456
+ mcolor(gray) ciopts(recast(rcap) lcolor(gray))
457
+
458
+ local coef_plot1 ///
459
+ label("Above median") ///
460
+ mcolor(maroon) ciopts(recast(rcap) lcolor(maroon))
461
+
462
+ coefplot (U*1, `coef_plot1') ///
463
+ (U*0, `coef_plot0'), ///
464
+ keep(U_*) ///
465
+ $COEFPLOT_HORIZONTAL_MED_SETTINGS ///
466
+ xtitle(" " "Treatment effect" "(standard deviations per hour/day of use)") ///
467
+ $COEF_SMALL_LABELS
468
+
469
+ graph export "output/coef_heterogenous_late_welfare.pdf", replace
470
+
471
+ end
472
+
473
+ ***********
474
+ * Execute *
475
+ ***********
476
+
477
+ main
17/replication_package/code/analysis/treatment_effects/code/ModelHeterogeneity.R ADDED
@@ -0,0 +1,1406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2
+ # Setup
3
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ # Import plotting functions and constants from lib file
6
+ source('input/lib/r/ModelFunctions.R')
7
+ p_B <- (hourly_rate / num_days) / 60
8
+ F_B <- (hourly_rate * max_hours) / num_days
9
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10
+ # Helper Functions
11
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12
+ # Adds what decile (or alternatively different step) the variable `x` is in
13
+ add_deciles <- function(x, step=0.1){
14
+ decile <- cut(x,
15
+ breaks=quantile(x,
16
+ probs=seq(0,1,by=step),
17
+ na.rm = TRUE),
18
+ include.lowest=TRUE,
19
+ labels=FALSE)
20
+ return(decile)
21
+ }
22
+
23
+ # Regresses for tau in bins by decile and plots
24
+ plot_tau <- function(df, tau_data, decile_name, variable_name, xlabel, eq='usage ~ PD_P1_UsageFITSBY + B + S', filename){
25
+
26
+ tau_data$decile <- tau_data[[decile_name]]
27
+ df$decile <- df[[decile_name]]
28
+ df$amount_var <- df[[variable_name]]
29
+
30
+ taus <- c()
31
+ deciles <- sort(unique(tau_data$decile))
32
+
33
+ formula <- eq
34
+ for (dec_idx in deciles){
35
+ var <- paste('L', dec_idx, sep="")
36
+ tau_data[[var]] <- ifelse(is.na(tau_data$decile),
37
+ 0,
38
+ ifelse(tau_data$decile == dec_idx,
39
+ tau_data$L,
40
+ 0))
41
+ formula <- paste(paste(formula, '+'), var)
42
+ }
43
+ fit <- lm(data = tau_data,
44
+ formula = formula)
45
+
46
+ print(formula)
47
+
48
+ for (dec_idx in deciles){
49
+ var <- paste('L', dec_idx, sep="")
50
+ taus <- c(taus, as.numeric(fit$coefficients[var]))
51
+ }
52
+
53
+ decile_amnts <- df %>%
54
+ group_by(decile) %>%
55
+ summarize(amount = mean(amount_var), .groups = "drop")
56
+
57
+ plot_tau <- data.frame(taus)
58
+ plot_tau$decile <- deciles
59
+
60
+ plot_tau %<>% left_join(decile_amnts,
61
+ by ="decile",
62
+ how="left")
63
+
64
+ print(plot_tau)
65
+
66
+ a <- ggplot(plot_tau, aes(x=amount, y=taus)) +
67
+ geom_point(color=maroon) +
68
+ theme_classic() +
69
+ geom_smooth(method = "lm",
70
+ formula = "y ~ x",
71
+ se = FALSE,
72
+ color="black",
73
+ size=0.6) +
74
+ labs(x = xlabel,
75
+ y = "Tau L")
76
+
77
+ ggsave(sprintf('output/%s.pdf', filename), plot=a, width=6.5, height=4.5, units="in")
78
+ }
79
+
80
+ # Binscatter by zkashner (more or less)
81
+ plot_value <- function(df, decile_name, variable_name, variable_amount, xlabel, ylabel, filename){
82
+
83
+ df$decile <- df[[decile_name]]
84
+ df$value_var <- df[[variable_name]]
85
+ df$amount_var <- df[[variable_amount]]
86
+
87
+ values <- c()
88
+ deciles <- unique(df$decile)
89
+
90
+ for (dec_idx in deciles){
91
+ subset <- df$decile == dec_idx
92
+ value <- mean(df[subset,]$value_var, na.rm = T) / num_days
93
+ values <- c(values, value)
94
+ }
95
+
96
+ decile_amnts <- df %>%
97
+ group_by(decile) %>%
98
+ summarize(amount = mean(amount_var), .groups = "drop")
99
+
100
+ plot_value <- data.frame(values)
101
+ plot_value$decile <- deciles
102
+
103
+ plot_value %<>% merge(decile_amnts,
104
+ by ="decile",
105
+ how="left")
106
+
107
+ print(plot_value)
108
+
109
+ a <- ggplot(plot_value, aes(x=amount, y=values)) +
110
+ geom_point(color=maroon) +
111
+ theme_classic() +
112
+ geom_smooth(method = "lm",
113
+ formula = "y ~ x",
114
+ se = FALSE,
115
+ color="black",
116
+ size=0.6) +
117
+ labs(x = xlabel,
118
+ y = ylabel)
119
+
120
+ ggsave(sprintf('output/%s.pdf', filename), plot=a, width=6.5, height=4.5, units="in")
121
+ }
122
+
123
+ reshape_tau_data <- function(df){
124
+ tau_data <- df %>%
125
+ select(
126
+ UserID,
127
+ w,
128
+ L,
129
+ B,
130
+ S,
131
+ addiction_decile,
132
+ restriction_decile,
133
+ tightness_decile,
134
+ PD_P1_UsageFITSBY,
135
+ PD_P2_UsageFITSBY,
136
+ PD_P3_UsageFITSBY,
137
+ PD_P4_UsageFITSBY,
138
+ PD_P5_UsageFITSBY
139
+ )
140
+
141
+ tau_data %<>%
142
+ gather(
143
+ key = 'period',
144
+ value = 'usage',
145
+ -UserID,
146
+ -w,
147
+ -L,
148
+ -B,
149
+ -S,
150
+ -PD_P1_UsageFITSBY,
151
+ -addiction_decile,
152
+ -restriction_decile,
153
+ -tightness_decile
154
+ )
155
+
156
+ return(tau_data)
157
+ }
158
+
159
+ reshape_tightness <- function(df){
160
+
161
+ pt1_usage <- df %>%
162
+ select(
163
+ UserID,
164
+ paste('PD_DailyUsage_', 1:10, sep="")) %>%
165
+ gather(
166
+ key = 'period',
167
+ value = 'usage',
168
+ -UserID,
169
+ ) %>%
170
+ group_by(UserID) %>%
171
+ summarize(PD_P1_PT1_UsageFITSBY = mean(usage, na.rm=TRUE), .groups = "drop")
172
+
173
+ pt2_usage <- df %>%
174
+ select(
175
+ UserID,
176
+ paste('PD_DailyUsage_', 11:20, sep="")) %>%
177
+ gather(
178
+ key = 'period',
179
+ value = 'usage',
180
+ -UserID,
181
+ ) %>%
182
+ group_by(UserID) %>%
183
+ summarize(PD_P1_PT2_UsageFITSBY = mean(usage, na.rm=TRUE), .groups = "drop")
184
+
185
+ tightness_df <- df %>%
186
+ select(
187
+ UserID,
188
+ w, L, B, S,
189
+ tightness_decile) %>%
190
+ merge(pt1_usage, how="left", on="UserID") %>%
191
+ merge(pt2_usage, how="left", on="UserID")
192
+
193
+ return(tightness_df)
194
+ }
195
+
196
+ reshape_mispredict <- function(df){
197
+ mpd_df <- df %>%
198
+ mutate(Mispredict_P2_S2 = PD_P2_UsageFITSBY - S2_PredictUseNext_1_W) %>%
199
+ mutate(Mispredict_P3_S2 = PD_P3_UsageFITSBY - S2_PredictUseNext_2_W) %>%
200
+ mutate(Mispredict_P4_S2 = PD_P4_UsageFITSBY - S2_PredictUseNext_3_W) %>%
201
+ mutate(Mispredict_S2 = (Mispredict_P2_S2 + Mispredict_P3_S2 + Mispredict_P4_S2)/3) %>%
202
+ mutate(mispredict_decile = add_deciles(Mispredict_P2_S2)) %>%
203
+ mutate(Mispredict_P3_S3 = PD_P3_UsageFITSBY - S3_PredictUseNext_1_W) %>%
204
+ mutate(Mispredict_P4_S3 = PD_P4_UsageFITSBY - S3_PredictUseNext_2_W) %>%
205
+ mutate(Mispredict_P5_S3 = PD_P5_UsageFITSBY - S3_PredictUseNext_3_W) %>%
206
+ mutate(Mispredict_S3 = (Mispredict_P3_S3 + Mispredict_P4_S3 + Mispredict_P5_S3)/3) %>%
207
+ mutate(Mispredict_P4_S4 = PD_P4_UsageFITSBY - S4_PredictUseNext_1_W) %>%
208
+ mutate(Mispredict_P5_S4 = PD_P5_UsageFITSBY - S4_PredictUseNext_2_W) %>%
209
+ mutate(Mispredict_S4 = (Mispredict_P4_S4 + Mispredict_P5_S4)/2) %>%
210
+ mutate(Mispredict_S34 = (3*Mispredict_S3 + 2*Mispredict_S4)/5) %>% #reweight
211
+ select(UserID, w, mispredict_decile, Mispredict_P2_S2, Mispredict_S2, Mispredict_S3, Mispredict_S4, Mispredict_S34)
212
+
213
+ return(mpd_df)
214
+ }
215
+
216
+ plot_taus <- function(df, tau_data, tightness_df){
217
+ plot_tau(df,
218
+ tau_data,
219
+ decile_name = 'addiction_decile',
220
+ variable_name = 'StratAddictionLifeIndex',
221
+ xlabel = "Addiction Index",
222
+ filename = "binscatter_heterogeneity_tau_addiction")
223
+
224
+ plot_tau(df,
225
+ tau_data,
226
+ decile_name = 'restriction_decile',
227
+ variable_name = 'StratWantRestrictionIndex',
228
+ xlabel = "Restriction Index",
229
+ filename = "binscatter_heterogeneity_tau_restriction")
230
+
231
+ plot_tau(df,
232
+ tau_data,
233
+ decile_name = 'tightness_decile',
234
+ variable_name = 'PD_P2_LimitTightFITSBY',
235
+ xlabel = "Limit Tightness",
236
+ filename = "binscatter_heterogeneity_tau_tightness")
237
+
238
+ plot_tau(df,
239
+ tightness_df,
240
+ decile_name = 'tightness_decile',
241
+ variable_name = 'PD_P2_LimitTightFITSBY',
242
+ xlabel = "Limit Tightness",
243
+ eq = 'PD_P1_PT2_UsageFITSBY ~ PD_P1_PT1_UsageFITSBY + B + S',
244
+ filename = "binscatter_heterogeneity_tau_tightness_placebo")
245
+ }
246
+
247
+ plot_valuations <- function(df){
248
+ vars <- c('behavioral_change_premium', 'S3_MPLLimit')
249
+ names <- c('Behavioral Change Premium', 'Limit Valuation')
250
+ file_exts <- c('behavioral_change_premium', 'v_L')
251
+
252
+ for (i in 1:2){
253
+ var_name <- vars[i]
254
+ ylabel <- names[i]
255
+ file_ext <- file_exts[i]
256
+
257
+ plot_value(df,
258
+ decile_name = "addiction_decile",
259
+ variable_name = var_name,
260
+ variable_amount = "StratAddictionLifeIndex",
261
+ xlabel = "Addiction Index",
262
+ ylabel = ylabel,
263
+ filename = sprintf("binscatter_heterogeneity_%s_addiction", file_ext))
264
+
265
+ plot_value(df,
266
+ decile_name = "restriction_decile",
267
+ variable_name = var_name,
268
+ variable_amount = "StratWantRestrictionIndex",
269
+ xlabel = "Restriction Index",
270
+ ylabel = ylabel,
271
+ filename = sprintf("binscatter_heterogeneity_%s_restriction", file_ext))
272
+
273
+ plot_value(df,
274
+ decile_name = "tightness_decile",
275
+ variable_name = var_name,
276
+ variable_amount = "PD_P2_LimitTightFITSBY",
277
+ xlabel = "Limit Tightness",
278
+ ylabel = ylabel,
279
+ filename = sprintf("binscatter_heterogeneity_%s_tightness", file_ext))
280
+ }
281
+ }
282
+
283
+ plot_mispredict <- function(mpd_df){
284
+ plot_value(mpd_df,
285
+ decile_name = "mispredict_decile",
286
+ variable_name = "Mispredict_S34",
287
+ variable_amount = "Mispredict_P2_S2",
288
+ xlabel = "Survey 2 Misprediction (minutes/day)",
289
+ ylabel = "Surveys 3 and 4 Misprediction (minutes/day)",
290
+ filename = "binscatter_heterogeneity_misprediction")
291
+ }
292
+
293
+ find_tau_spec <- function(df){
294
+
295
+ days_beg <- 1:10
296
+ days_end <- 11:20
297
+
298
+ tau_data <- df %>%
299
+ mutate(tightness=ifelse(L,PD_P2_LimitTightFITSBY, 0)) %>%
300
+ mutate(PD_P1beg_Usage_FITSBY =
301
+ rowSums(.[paste0('PD_DailyUsageFITSBY_',days_beg)], na.rm=TRUE)/length(days_beg),
302
+ PD_P1end_Usage_FITSBY =
303
+ rowSums(.[paste0('PD_DailyUsageFITSBY_',days_end)], na.rm=TRUE)/length(days_end)) %>%
304
+ select(
305
+ UserID,
306
+ w, L, B, S,
307
+ PD_P1_UsageFITSBY,
308
+ PD_P1beg_Usage_FITSBY,
309
+ PD_P1end_Usage_FITSBY,
310
+ PD_P2_UsageFITSBY,
311
+ PD_P3_UsageFITSBY,
312
+ PD_P4_UsageFITSBY,
313
+ PD_P5_UsageFITSBY,
314
+ PD_P2_LimitTightFITSBY,
315
+ tightness
316
+ )
317
+
318
+
319
+ fit_1 <-lm('PD_P1end_Usage_FITSBY ~ B + L + tightness + PD_P1beg_Usage_FITSBY + S',
320
+ data= tau_data, weights = w)
321
+
322
+ cluster_se1 <- as.vector(summary(fit_1,cluster = c("UserID"))$coefficients[,"Std. Error"])
323
+
324
+ # the last command prints the stargazer output (in this case as text)
325
+
326
+ fit_2 <- lm('PD_P2_UsageFITSBY ~ B + L + tightness + PD_P1_UsageFITSBY+ S',
327
+ data= tau_data, weights = w)
328
+
329
+ cluster_se2 <- as.vector(summary(fit_2,cluster = c("UserID"))$coefficients[,"Std. Error"])
330
+
331
+
332
+ fit_3 <- lm('PD_P3_UsageFITSBY ~ B + L + tightness + PD_P1_UsageFITSBY + S',
333
+ data=tau_data,weights = w)
334
+
335
+ cluster_se3 <- as.vector(summary(fit_3,cluster = c("UserID"))$coefficients[,"Std. Error"])
336
+
337
+
338
+ stargazer(fit_1, fit_2, fit_3,
339
+ omit.stat = c("adj.rsq","f","ser"),
340
+ se = list(cluster_se1, cluster_se2, cluster_se3),
341
+ covariate.labels = c("Bonus treatment", "Limit treatment",
342
+ "Limit treatment $\\times$ period 2 limit tightness",
343
+ "1st half of period 1 FITSBY use", "Period 1 FITSBY use"),
344
+ align = TRUE,
345
+ dep.var.labels.include = FALSE,
346
+ column.labels = c('\\shortstack{2nd half of period 1 \\\\ FITSBY use}',
347
+ '\\shortstack{Period 2 \\\\ FITSBY use}',
348
+ '\\shortstack{Period 3 \\\\ FITSBY use}'),
349
+ title = "",
350
+ omit = c("Intercept", "S1", "S2", "S3", "S4",
351
+ "S5", "S6", "S7", "S8", "Constant"),
352
+ type = "latex",
353
+ omit.table.layout = "n",
354
+ float = FALSE,
355
+ dep.var.caption = "",
356
+ star.cutoffs = NA,
357
+ out = "output/heterogeneity_reg.tex"
358
+ )
359
+
360
+ return()
361
+ }
362
+
363
+
364
+
365
+
366
+ plot_weekly_effects <- function(df, filename1, filename2){
367
+ get_df <- function(df){
368
+ bonus_coefs <- c()
369
+ limit_coefs <- c()
370
+ bonus_lower <- c()
371
+ bonus_upper <- c()
372
+ limit_upper<- c()
373
+ limit_lower<- c()
374
+
375
+ for (t in 4:15){
376
+ dep_var <- sprintf('PD_WeeklyUsageFITSBY_%s', t)
377
+ eq <- paste0(dep_var, '~ PD_WeeklyUsageFITSBY_3 + L + B + S')
378
+
379
+ # Run regression
380
+ fit <- lm(data = df,
381
+ formula = eq,
382
+ weights = w)
383
+
384
+
385
+
386
+ bonus_coefs <- c(bonus_coefs, summary(fit)$coefficients[4,1])
387
+ limit_coefs <- c(limit_coefs, summary(fit)$coefficients[3,1])
388
+ bonus_lower <- c(bonus_lower, summary(fit, cluster= c("UserID"))$coefficients[4,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[4,2])
389
+ bonus_upper <- c(bonus_upper, summary(fit, cluster= c("UserID"))$coefficients[4,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[4,2])
390
+ limit_upper<- c(limit_upper, summary(fit, cluster= c("UserID"))$coefficients[3,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
391
+ limit_lower<- c(limit_lower, summary(fit, cluster= c("UserID"))$coefficients[3,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
392
+
393
+ }
394
+
395
+ weeklydataframe <- as.data.frame(cbind(bonus_coefs, limit_coefs, bonus_lower,
396
+ bonus_upper, limit_lower, limit_upper ))
397
+
398
+
399
+ names(weeklydataframe) <- c("bonus_coefs", "limit_coefs", "bonus_lower",
400
+ "bonus_upper", "limit_lower", "limit_upper")
401
+
402
+
403
+ return(weeklydataframe)
404
+ }
405
+
406
+
407
+ df_weekly <- get_df(df)
408
+
409
+ x <- c('4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15')
410
+ names <- factor(x, levels=x)
411
+
412
+ weeklydf <- data.frame(names, df_weekly)
413
+
414
+ b <- ggplot(weeklydf, aes(x=names, width=.2)) +
415
+ geom_point(aes(y=bonus_coefs), colour=maroon, stat="identity") +
416
+ geom_errorbar(aes(ymin=bonus_upper, ymax=bonus_lower), colour=maroon, stat="identity") +
417
+ scale_y_continuous(name="Treatment effect (minutes/day)") +
418
+ theme_classic() +
419
+ #theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
420
+ labs(x = "Week of experiment") +
421
+ theme(legend.text.align = 0,
422
+ legend.key.height = unit(1, "cm"),
423
+ legend.position="bottom") +
424
+ theme(legend.margin=margin(0,0,0,0),
425
+ legend.box.margin=margin(-10,-10,-10,-10)) +
426
+ theme(axis.text.x = element_text(colour="black")) +
427
+ coord_cartesian(ylim = c(-70, 5)) +
428
+ theme(legend.text=element_text(size=11)) +
429
+ theme( # remove the vertical grid lines
430
+ panel.grid.major.x = element_blank() ,
431
+ # explicitly set the horizontal lines (or they will disappear too)
432
+ panel.grid.major.y = element_line( size=.05, color="grey" )
433
+ )
434
+
435
+ l <- ggplot(weeklydf, aes(x=names, width=.2)) +
436
+ geom_point(aes(y=limit_coefs), colour=grey, stat="identity") +
437
+ geom_errorbar(aes(ymin=limit_upper, ymax=limit_lower), colour=grey, stat="identity") +
438
+ scale_y_continuous(name="Treatment effect (minutes/day)") +
439
+ theme_classic() +
440
+ #theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
441
+ labs(x = "Week of experiment") +
442
+ theme(legend.text.align = 0,
443
+ legend.key.height = unit(1, "cm"),
444
+ legend.position="bottom") +
445
+ theme(legend.margin=margin(0,0,0,0),
446
+ legend.box.margin=margin(-10,-10,-10,-10)) +
447
+ theme(axis.text.x = element_text(colour="black")) +
448
+ coord_cartesian(ylim = c(-70, 5)) +
449
+ theme(legend.text=element_text(size=11)) +
450
+ theme( # remove the vertical grid lines
451
+ panel.grid.major.x = element_blank() ,
452
+ # explicitly set the horizontal lines (or they will disappear too)
453
+ panel.grid.major.y = element_line( size=.05, color="grey" )
454
+ )
455
+
456
+
457
+ ggsave(sprintf('output/%s.pdf', filename1), plot=b, width=6.5, height=4.5, units="in")
458
+ ggsave(sprintf('output/%s.pdf', filename2), plot=l, width=6.5, height=4.5, units="in")
459
+ }
460
+
461
+
462
+
463
+
464
+
465
+
466
+ plot_treatment_effects <- function(df, filename1, filename2, filename3){
467
+ period_usage <- c("PD_P2_UsageFITSBY", "PD_P3_UsageFITSBY", "PD_P4_UsageFITSBY", "PD_P5_UsageFITSBY")
468
+
469
+ bonus_coefs <- c()
470
+ limit_coefs <- c()
471
+ bonus_lower <- c()
472
+ bonus_upper <- c()
473
+ limit_upper<- c()
474
+ limit_lower<- c()
475
+
476
+ for (period in period_usage){
477
+ dep_var <- period
478
+ eq <- paste0(dep_var, '~ PD_P1_UsageFITSBY + L + B + S')
479
+
480
+ fit <- lm(data = df,
481
+ formula = eq,
482
+ weights = w)
483
+
484
+ bonus_coefs <- c(bonus_coefs, summary(fit)$coefficients[4,1])
485
+ bonus_lower <- c(bonus_lower, summary(fit, cluster= c("UserID"))$coefficients[4,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[4,2])
486
+ bonus_upper <- c(bonus_upper, summary(fit, cluster= c("UserID"))$coefficients[4,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[4,2])
487
+
488
+ limit_coefs <- c(limit_coefs, summary(fit)$coefficients[3,1])
489
+ limit_lower <- c(limit_lower, summary(fit, cluster= c("UserID"))$coefficients[3,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
490
+ limit_upper <- c(limit_upper, summary(fit, cluster= c("UserID"))$coefficients[3,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
491
+
492
+ }
493
+
494
+ x <- c('Period 2', 'Period 3', 'Period 4', 'Period 5')
495
+ names <- factor(x, levels=x)
496
+ periodtreatments <- data.frame(names, bonus_coefs, limit_coefs, bonus_lower,
497
+ bonus_upper, limit_lower, limit_upper )
498
+
499
+
500
+
501
+ cols <- c("Bonus"=maroon ,
502
+ "Limit"=grey)
503
+
504
+ cols_shape <- c("Bonus"=16 ,
505
+ "Limit"=15)
506
+
507
+ a <- ggplot(periodtreatments, aes(x=names, width=.2)) +
508
+ geom_point(aes(y=bonus_coefs, colour="Bonus"), stat="identity", position = position_nudge(x = -.1)) +
509
+ geom_point(aes(y=limit_coefs, colour="Limit"), stat="identity", position = position_nudge(x = .1))+
510
+ geom_errorbar(aes(ymin=bonus_upper, ymax=bonus_lower, width=0.05), stat="identity", colour=maroon, position = position_nudge(x = -.1)) +
511
+ geom_errorbar(aes(ymin=limit_lower, ymax=limit_upper, width=0.05), stat="identity", colour=grey, position=position_nudge(x = .1)) +
512
+ scale_y_continuous(name="Treatment effect (minutes/day)") +
513
+ theme_classic() +
514
+ #theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
515
+ labs(x = "") +
516
+ theme(legend.text.align = 0,
517
+ legend.key.height = unit(1, "cm"),
518
+ legend.position="bottom") +
519
+ theme(legend.margin=margin(0,0,0,0),
520
+ legend.box.margin=margin(-10,-10,-10,-10)) +
521
+ theme(axis.text.x = element_text(colour="black")) +
522
+ coord_cartesian(ylim = c(-70, 5)) +
523
+ theme(legend.text=element_text(size=11)) +
524
+ theme( # remove the vertical grid lines
525
+ panel.grid.major.x = element_blank() ,
526
+ # explicitly set the horizontal lines (or they will disappear too)
527
+ panel.grid.major.y = element_line( size=.05, color="grey" )
528
+ )+
529
+ scale_colour_manual(name = "", values=cols,
530
+ labels = c("Bonus", "Limit")) +
531
+ guides(colour=guide_legend(title.position="top",
532
+ title.hjust =0.5))
533
+
534
+
535
+ b <- ggplot(periodtreatments, aes(x=names, width=.2)) +
536
+ geom_point(aes(y=bonus_coefs), colour=maroon, stat="identity") +
537
+ geom_errorbar(aes(ymin=bonus_upper, ymax=bonus_lower, width=0.05), colour=maroon, stat="identity") +
538
+ scale_y_continuous(name="Treatment effect (minutes/day)") +
539
+ theme_classic() +
540
+ #theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
541
+ labs(x = "") +
542
+ theme(legend.text.align = 0,
543
+ legend.key.height = unit(1, "cm"),
544
+ legend.position="bottom") +
545
+ theme(legend.margin=margin(0,0,0,0),
546
+ legend.box.margin=margin(-10,-10,-10,-10)) +
547
+ theme(axis.text.x = element_text(colour="black")) +
548
+ coord_cartesian(ylim = c(-70, 5)) +
549
+ theme(legend.text=element_text(size=11)) +
550
+ theme( # remove the vertical grid lines
551
+ panel.grid.major.x = element_blank() ,
552
+ # explicitly set the horizontal lines (or they will disappear too)
553
+ panel.grid.major.y = element_line( size=.05, color="grey" )
554
+ )
555
+
556
+
557
+
558
+ l <- ggplot(periodtreatments, aes(x=names, width=.2)) +
559
+ geom_point(aes(y=limit_coefs), colour=grey, stat="identity") +
560
+ geom_errorbar(aes(ymin=limit_upper, ymax=limit_lower, width=0.05), colour=grey, stat="identity") +
561
+ scale_y_continuous(name="Treatment effect (minutes/day)") +
562
+ theme_classic() +
563
+ #theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
564
+ labs(x = "") +
565
+ theme(legend.text.align = 0,
566
+ legend.key.height = unit(1, "cm"),
567
+ legend.position="bottom") +
568
+ theme(legend.margin=margin(0,0,0,0),
569
+ legend.box.margin=margin(-10,-10,-10,-10)) +
570
+ theme(axis.text.x = element_text(colour="black")) +
571
+ coord_cartesian(ylim = c(-70, 5)) +
572
+ theme(legend.text=element_text(size=11)) +
573
+ theme( # remove the vertical grid lines
574
+ panel.grid.major.x = element_blank() ,
575
+ # explicitly set the horizontal lines (or they will disappear too)
576
+ panel.grid.major.y = element_line( size=.05, color="grey" )
577
+ )
578
+
579
+ ggsave(sprintf('output/%s.pdf', filename1), plot=a, width=6.5, height=4.5, units="in")
580
+ ggsave(sprintf('output/%s.pdf', filename2), plot=b, width=6.5, height=4.5, units="in")
581
+ ggsave(sprintf('output/%s.pdf', filename3), plot=l, width=6.5, height=4.5, units="in")
582
+
583
+
584
+ }
585
+
586
+ plot_treatment_effects_interaction <- function(df, filename1){
587
+ period_usage <- c("PD_P2_UsageFITSBY", "PD_P3_UsageFITSBY", "PD_P4_UsageFITSBY", "PD_P5_UsageFITSBY")
588
+
589
+ bonus_coefs <- c()
590
+ limit_coefs <- c()
591
+ bonus_lower <- c()
592
+ bonus_upper <- c()
593
+ limit_upper<- c()
594
+ limit_lower<- c()
595
+ interaction_coefs <- c()
596
+ interaction_lower <- c()
597
+ interaction_upper <- c()
598
+
599
+ for (period in period_usage){
600
+ dep_var <- period
601
+ eq <- paste0(dep_var, '~ PD_P1_UsageFITSBY + L + B + L*B + S')
602
+
603
+ fit <- lm(data = df,
604
+ formula = eq,
605
+ weights = w)
606
+
607
+ bonus_coefs <- c(bonus_coefs, summary(fit)$coefficients[4,1])
608
+ bonus_lower <- c(bonus_lower, summary(fit, cluster= c("UserID"))$coefficients[4,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[4,2])
609
+ bonus_upper <- c(bonus_upper, summary(fit, cluster= c("UserID"))$coefficients[4,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[4,2])
610
+
611
+ limit_coefs <- c(limit_coefs, summary(fit)$coefficients[3,1])
612
+ limit_lower <- c(limit_lower, summary(fit, cluster= c("UserID"))$coefficients[3,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
613
+ limit_upper <- c(limit_upper, summary(fit, cluster= c("UserID"))$coefficients[3,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
614
+
615
+ interaction_coefs <- c(interaction_coefs, summary(fit)$coefficients[12,1])
616
+ interaction_lower <- c(interaction_lower, summary(fit, cluster= c("UserID"))$coefficients[12,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[12,2])
617
+ interaction_upper <- c(interaction_upper, summary(fit, cluster= c("UserID"))$coefficients[12,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[12,2])
618
+
619
+ }
620
+
621
+
622
+
623
+ x <- c('Period 2', 'Period 3', 'Period 4', 'Period 5')
624
+ names <- factor(x, levels=x)
625
+
626
+ periodtreatments <- data.frame(names, bonus_coefs, bonus_lower, bonus_upper, limit_coefs, limit_lower, limit_upper,interaction_coefs, interaction_lower, interaction_upper)
627
+
628
+ periodtreatments$bonus <- "Bonus"
629
+ periodtreatments$limit <- "Limit"
630
+ periodtreatments$BL <- "Limit x Bonus"
631
+
632
+
633
+ maroon <- '#94343c'
634
+ grey <- '#848484'
635
+ skyblue <- '#87CEEB'
636
+ black <- '#000000'
637
+ deepskyblue <- '#B0C4DE'
638
+
639
+ cols <- c("Bonus"=maroon ,
640
+ "Limit"=grey,
641
+ "Limit x Bonus"= deepskyblue)
642
+
643
+ cols_shape <- c("Bonus"=15 ,
644
+ "Limit"=19,
645
+ "Limit x Bonus"= 17)
646
+
647
+ a <- ggplot(periodtreatments, aes(x=names, width=.2)) +
648
+ geom_point(aes(y=bonus_coefs, colour=bonus, shape =bonus), stat="identity", position = position_nudge(x = -.2)) +
649
+ geom_point(aes(y=limit_coefs, colour=limit, shape=limit), stat="identity", position = position_nudge(x = 0))+
650
+ geom_point(aes(y=interaction_coefs, colour=BL, shape=BL), stat="identity", position = position_nudge(x = 0.2)) +
651
+ geom_errorbar(aes(ymin=bonus_upper, ymax=bonus_lower, width=0.05), stat="identity", colour=maroon, position = position_nudge(x = -.2)) +
652
+ geom_errorbar(aes(ymin=limit_lower, ymax=limit_upper, width=0.05), stat="identity", colour=grey, position=position_nudge(x =0)) +
653
+ geom_errorbar(aes(ymin=interaction_lower, ymax=interaction_upper, width=0.05), stat="identity", colour=deepskyblue, position=position_nudge(x =0.2)) +
654
+ scale_y_continuous(name="Treatment effect (minutes/day)") +
655
+ theme_classic() +
656
+ #theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
657
+ labs(x = "") +
658
+ theme(legend.text.align = 0,
659
+ legend.key.height = unit(1, "cm"),
660
+ legend.position="bottom") +
661
+ theme(legend.margin=margin(0,0,0,0),
662
+ legend.box.margin=margin(-10,-10,-10,-10)) +
663
+ theme(axis.text.x = element_text(colour="black")) +
664
+ coord_cartesian(ylim = c(-80, 20)) +
665
+ theme(legend.text=element_text(size=11)) +
666
+ theme( # remove the vertical grid lines
667
+ panel.grid.major.x = element_blank() ,
668
+ # explicitly set the horizontal lines (or they will disappear too)
669
+ panel.grid.major.y = element_line( size=.05, color="grey" )
670
+ )+
671
+ scale_colour_manual(name = "",
672
+ values=cols) +
673
+ scale_shape_manual(name = "",
674
+ values = cols_shape)
675
+
676
+ ggsave(sprintf('output/%s.pdf', filename1), plot=a, width=6.5, height=4.5, units="in")
677
+
678
+
679
+ }
680
+
681
+ get_opt <- function(df) {
682
+ # Specify regression
683
+
684
+ analysisUser <- read_dta("input/AnalysisUser.dta")
685
+
686
+ limit <- analysisUser %>%
687
+ filter(AppCode %in% df$UserID) %>%
688
+ select(OptedOut) %>%
689
+ filter(!is.na(OptedOut))
690
+
691
+
692
+ estimate <-
693
+ list(nrow(limit %>% filter(OptedOut==1)) ,
694
+ signif(nrow(limit %>% filter(OptedOut==1))/ nrow(df %>% filter(L==1))*100, digits=1))
695
+
696
+ names(estimate) <- c('numberpeopleoptedout', 'percentoptedout')
697
+
698
+ save_nrow(estimate, filename ="optingout", suffix="")
699
+ }
700
+
701
+
702
+ get_addiction_treatment_effect <- function(df, filename){
703
+ survey_outcomes <- c("index_well_N", "SWBIndex_N", "LifeBetter_N", "SMSIndex_N", "AddictionIndex_N", "PhoneUseChange_N")
704
+
705
+ bonus_coefs <- c()
706
+ limit_coefs <- c()
707
+ bonus_lower <- c()
708
+ bonus_upper <- c()
709
+ limit_upper<- c()
710
+ limit_lower<- c()
711
+
712
+
713
+ df <- df %>%
714
+ mutate(S43_PhoneUseChange_N = (S4_PhoneUseChange_N + S3_PhoneUseChange_N)/2,
715
+ S43_AddictionIndex_N = (S4_AddictionIndex_N + S3_AddictionIndex_N)/2,
716
+ S43_SMSIndex_N = (S4_SMSIndex_N + S3_SMSIndex_N)/2,
717
+ S43_LifeBetter_N = (S4_LifeBetter_N + S3_LifeBetter_N)/2,
718
+ S43_SWBIndex_N = (S4_SWBIndex_N + S3_SWBIndex_N)/2,
719
+ S43_index_well_N = (S4_index_well_N + S3_index_well_N)/2)
720
+
721
+
722
+ for (outcome in survey_outcomes){
723
+ dep_var <- sprintf("S4_%s", outcome)
724
+ indep_var <- sprintf("S1_%s", outcome)
725
+ eq <- paste0(paste0(dep_var, '~ L + B + S + '), indep_var)
726
+
727
+ fit <- lm(data = df,
728
+ formula = eq,
729
+ weights = w)
730
+
731
+ bonus_coefs <- c(bonus_coefs, summary(fit)$coefficients[3,1])
732
+ bonus_lower <- c(bonus_lower, summary(fit, cluster= c("UserID"))$coefficients[3,1] -1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
733
+ bonus_upper <- c(bonus_upper, summary(fit, cluster= c("UserID"))$coefficients[3,1] +1.96*summary(fit, cluster= c("UserID"))$coefficients[3,2])
734
+
735
+
736
+ dep_var_limit <- sprintf("S43_%s", outcome)
737
+ indep_var <- sprintf("S1_%s", outcome)
738
+ eq_limit <- paste0(paste0(dep_var_limit, '~ L + B + S + '), indep_var)
739
+
740
+ fit_limit <- lm(data = df,
741
+ formula = eq_limit,
742
+ weights = w)
743
+
744
+ limit_coefs <- c(limit_coefs, summary(fit_limit)$coefficients[3,1])
745
+ limit_lower <- c(limit_lower, summary(fit_limit, cluster= c("UserID"))$coefficients[3,1] -1.96*summary(fit_limit, cluster= c("UserID"))$coefficients[3,2])
746
+ limit_upper <- c(limit_upper, summary(fit_limit, cluster= c("UserID"))$coefficients[3,1] +1.96*summary(fit_limit, cluster= c("UserID"))$coefficients[3,2])
747
+
748
+ }
749
+
750
+ weeklydataframe <- as.data.frame(cbind(bonus_coefs, limit_coefs, bonus_lower,
751
+ bonus_upper, limit_lower, limit_upper ))
752
+
753
+ names(weeklydataframe) <- c("bonus_coefs", "limit_coefs", "bonus_lower",
754
+ "bonus_upper", "limit_lower", "limit_upper")
755
+
756
+
757
+
758
+ x <- c('Survey index', 'Subjective well-being', 'Phone makes life better', 'SMS addiction scale x (-1)', 'Addiction scale x(-1)', 'Ideal use change')
759
+ names <- factor(x, levels=x)
760
+
761
+ weeklydf <- data.frame(names, weeklydataframe)
762
+
763
+
764
+ cols <- c("Bonus"=maroon,
765
+ "Limit"=grey)
766
+
767
+ a <- ggplot(weeklydf, aes(x=names, width=.2)) +
768
+ geom_point(aes(y=bonus_coefs, colour="Bonus"), stat="identity", position = position_nudge(x = -.1)) +
769
+ geom_point(aes(y=limit_coefs, colour="Limit"), stat="identity", position = position_nudge(x = .1))+
770
+ geom_errorbar(aes(ymin=bonus_upper, ymax=bonus_lower, width=0.05), stat="identity", colour=maroon, position = position_nudge(x = -.1)) +
771
+ geom_errorbar(aes(ymin=limit_lower, ymax=limit_upper, width=0.05), stat="identity", colour=grey, position=position_nudge(x = .1)) +
772
+ scale_y_continuous(name="Treatment effect (standard deviation)") +
773
+ theme_classic() +
774
+ scale_colour_manual(name = "", values=cols,
775
+ labels = c("Bonus", "Limit")) +
776
+ labs(x = "") +
777
+ geom_hline(yintercept=0) +
778
+ coord_flip(ylim = c(-0.2,0.6)) +
779
+ theme(legend.text=element_text(size=11)) +
780
+ theme( # remove the vertical grid lines
781
+ panel.grid.major.x = element_blank() ,
782
+ # explicitly set the horizontal lines (or they will disappear too)
783
+ panel.grid.major.y = element_line( size=.09, color="grey" )
784
+ ) +
785
+ theme(legend.position="bottom")
786
+
787
+ ggsave(sprintf('output/%s.pdf', filename), plot=a, width=6.5, height=4.5, units="in")
788
+ }
789
+
790
+
791
+ get_addiction_scalar <- function(df){
792
+ addiction <- df %>%
793
+ select(contains("Addiction"))
794
+
795
+ df_addiction <- df
796
+ for (i in 1:16){
797
+ df_addiction <- df_addiction %>%
798
+ mutate(!!as.name(paste0("High_S1_Addiction_", i)) := ifelse(!!as.name(paste0("S1_Addiction_",i))>0.5, 1, 0)) %>%
799
+ mutate(!!as.name(paste0("High_S3_Addiction_", i)) := ifelse(!!as.name(paste0("S3_Addiction_",i))>0.5, 1, 0))
800
+ }
801
+
802
+ df_means <- df_addiction
803
+ for (i in 1:16){
804
+ df_means <- df_means %>%
805
+ mutate(!!as.name(paste0("Mean_High_S1_Addiction_", i)) := mean(!!as.name(paste0("High_S1_Addiction_",i)), na.rm = T)) %>%
806
+ mutate(!!as.name(paste0("Mean_High_S3_Addiction_", i)) := mean(!!as.name(paste0("High_S3_Addiction_",i)), na.rm = T))
807
+ }
808
+
809
+ df_S3_addiction <- df_means %>%
810
+ select(contains("Mean_High_S3")) %>%
811
+ unique()
812
+
813
+
814
+ df_S3_addiction$top_seven <- rowMeans(df_S3_addiction[1:7], na.rm=TRUE)
815
+ df_S3_addiction$bottom_nine <- rowMeans(df_S3_addiction[8:16], na.rm=TRUE)
816
+
817
+ mean_top_seven <- (df_S3_addiction$top_seven)*100
818
+ mean_bottom_nine <- (df_S3_addiction$bottom_nine)*100
819
+
820
+ df_addiction_high <- df_addiction %>%
821
+ select(contains("High_S3_Addiction_"))
822
+
823
+ df_addiction_high$top_seven_any <- rowSums(df_addiction_high[1:7], na.rm=TRUE)
824
+ df_addiction_high$bottom_nine_any <- rowSums(df_addiction_high[8:16], na.rm=TRUE)
825
+
826
+ df_addiction_high <-df_addiction_high %>%
827
+ mutate(top_seven_any_indicator = ifelse(top_seven_any>0, 1, 0),
828
+ bottom_nine_any_indicator = ifelse(bottom_nine_any>0, 1, 0))
829
+
830
+ mean_top_seven_any <- mean(df_addiction_high$top_seven_any_indicator, na.rm=TRUE)*100
831
+ mean_bottom_nine_any <- mean(df_addiction_high$bottom_nine_any_indicator, na.rm=TRUE)*100
832
+
833
+ mean_top_seven <- signif(mean_top_seven, digits=2)
834
+ mean_top_seven_any <- signif(mean_top_seven_any, digits=2)
835
+ mean_bottom_nine <- signif(mean_bottom_nine, digits=2)
836
+ mean_bottom_nine_any <- signif(mean_bottom_nine_any, digits=2)
837
+
838
+ limit_tightness_df <- df %>%
839
+ filter(L==1) %>%
840
+ select(contains("PD_P5432_LimitTight"))
841
+
842
+ limit_tightness_df_nomissing <- df %>%
843
+ filter(L==1) %>%
844
+ filter(PD_P5432_LimitTight>0)
845
+
846
+ limit_tightness_df_pfive <- df %>%
847
+ filter(L==1) %>%
848
+ select(contains("PD_P5_LimitTight"))
849
+
850
+ limit_tightness_df_nomissing_pfive <- df %>%
851
+ filter(L==1) %>%
852
+ filter(PD_P5_LimitTight>0)
853
+
854
+ percent_positive_tightness <- nrow(limit_tightness_df_nomissing) / nrow(limit_tightness_df)
855
+ percent_positive_tightness_pfive <- nrow(limit_tightness_df_nomissing_pfive) / nrow(limit_tightness_df_pfive)
856
+
857
+ average_limit_tightness <- mean(limit_tightness_df$PD_P5432_LimitTight, na.rm=TRUE)
858
+ average_limit_tightness_pfive <- mean(limit_tightness_df_pfive$PD_P5_LimitTight, na.rm=TRUE)
859
+
860
+ percentpositivetightness <- signif(percent_positive_tightness, digits=2)*100
861
+ percentpositivetightnesspfive <- signif(percent_positive_tightness_pfive, digits=2)*100
862
+
863
+ averagelimittightness <- signif(average_limit_tightness, digits=2)
864
+ averagelimittightnesspfive <- signif(average_limit_tightness_pfive, digits=2)
865
+
866
+
867
+ limit_df <- df %>%
868
+ filter(L==1) %>%
869
+ select(PD_P5432_LimitTight_Facebook, PD_P5432_LimitTight_Browser, PD_P5432_LimitTight_YouTube, PD_P5432_LimitTight_Instagram)
870
+
871
+
872
+ limit_df[is.na(limit_df)] <- 0
873
+
874
+ mean_fb <- mean(limit_df$PD_P5432_LimitTight_Facebook)
875
+ mean_browser <- mean(limit_df$PD_P5432_LimitTight_Browser)
876
+ mean_youtube <- mean(limit_df$PD_P5432_LimitTight_YouTube)
877
+ mean_insta <- mean(limit_df$PD_P5432_LimitTight_Instagram)
878
+
879
+ mean_insta_nice <- signif(mean_insta, digits=1)
880
+ mean_youtube_nice <- signif(mean_youtube, digits=1)
881
+ mean_browser_nice <- signif(mean_browser, digits=1)
882
+ mean_fb_nice <- signif(mean_fb, digits=1)
883
+
884
+ mpl_df <- df %>%
885
+ filter(B==1) %>%
886
+ select(S2_PredictUseInitial, S2_PredictUseBonus)
887
+
888
+ mean_initial_use <- mean(mpl_df$S2_PredictUseInitial, na.rm=TRUE)/60
889
+ mean_use_bonus <- mean(mpl_df$S2_PredictUseBonus, na.rm=TRUE)/60
890
+
891
+
892
+ df <- df %>%
893
+ mutate(F_B_uncensored = 50*PD_P1_UsageFITSBY/20) %>%
894
+ mutate(F_B_min = ifelse(F_B_uncensored<150, F_B_uncensored, 150)) %>%
895
+ mutate(F_B = F_B_min/num_days)
896
+
897
+ FB <- mean(df$F_B, na.rm = T)
898
+
899
+ num_days <- 20
900
+ hourly_rate <- 50
901
+ max_hours <- 3
902
+ p_B <- (hourly_rate / num_days)
903
+ abcd <- p_B*0.5*(mean_use_bonus+mean_initial_use)
904
+ MPL <- FB - abcd
905
+
906
+ MPLearningsmean <- MPL*20
907
+
908
+ MPLvalued <- signif(mean(df$S2_MPL, na.rm=T), digits=2)
909
+ MPLearningsnice <- signif(MPLearningsmean, digits=2)
910
+ MPLpremiumnice <- MPLvalued - MPLearningsnice
911
+
912
+ p <-paste0(p_B, "0")
913
+ meanpredictuse <- signif(mean_initial_use, digits=2)
914
+ meanpredictbonus <- signif(mean_use_bonus, digits=2)
915
+ abcd <- signif(abcd, digits=3)
916
+ MPL <- signif(MPL, digits=3)
917
+ vB <- MPLvalued/20
918
+
919
+ behaviourpremium <- vB - MPL
920
+
921
+ fit_3 <- lm(data=df, PD_P5432_Usage_Other ~ PD_P1_Usage_Other + L + B + S)
922
+ limitotherfitsby <- fit_3$coefficients[['L']]
923
+ limitotherfitsbynice <- signif(limitotherfitsby, digits=2)
924
+
925
+
926
+ estimate <-
927
+ list(mean_top_seven, mean_top_seven_any, mean_bottom_nine, mean_bottom_nine_any,
928
+ percentpositivetightness, averagelimittightness, percentpositivetightnesspfive, averagelimittightnesspfive, mean_insta_nice, mean_youtube_nice, mean_browser_nice,
929
+ mean_fb_nice, MPLvalued, MPLearningsnice, MPLpremiumnice,
930
+ p,meanpredictuse,meanpredictbonus, abcd, MPL, behaviourpremium, limitotherfitsbynice)
931
+ names(estimate) <- c('meantopsevenaddiction', 'meantopsevenanyaddiction', 'meanbottomnineaddiction',
932
+ 'meanbottomnineanyaddiction', 'percentpositivetightness', 'averagelimittightness', 'percentpositivetightnesspfive', 'averagelimittightnesspfive', 'instalimittight', 'youtubelimittight',
933
+ 'browserlimittight', 'fblimittight',
934
+ 'MPLvalued', 'MPLearningsnice', 'MPLpremiumnice','p','meanpredictuse','meanpredictbonus', 'abcd', 'MPL', 'behaviourpremium', 'limitotherfitsbynice')
935
+
936
+ save_nrow(estimate, filename ="addiction_scalars", suffix="")
937
+
938
+
939
+
940
+ }
941
+
942
+ get_swb_effect_exported_limit <- function(df){
943
+ df <- df %>%
944
+ mutate( S43_PhoneUseChange_N = (S4_PhoneUseChange_N + S3_PhoneUseChange_N)/2,
945
+ S43_AddictionIndex_N = (S4_AddictionIndex_N + S3_AddictionIndex_N)/2,
946
+ S43_SMSIndex_N = (S4_SMSIndex_N + S3_SMSIndex_N)/2,
947
+ S43_LifeBetter_N = (S4_LifeBetter_N + S3_LifeBetter_N)/2,
948
+ S43_SWBIndex_N = (S4_SWBIndex_N + S3_SWBIndex_N)/2,
949
+ S43_index_well_N = (S4_index_well_N + S3_index_well_N)/2 ,
950
+ S43_PhoneUseChange = (S4_PhoneUseChange + S3_PhoneUseChange)/2,
951
+ S43_AddictionIndex = (S4_AddictionIndex + S3_AddictionIndex)/2,
952
+ S43_SMSIndex = (S4_SMSIndex + S3_SMSIndex)/2,
953
+ S43_LifeBetter = (S4_LifeBetter + S3_LifeBetter)/2,
954
+ S43_SWBIndex = (S4_SWBIndex + S3_SWBIndex)/2,
955
+ S43_index_well= (S4_index_well + S3_index_well)/2)
956
+
957
+
958
+ fit<- lm_robust(data=df, formula = S43_PhoneUseChange_N ~ S1_PhoneUseChange_N + B+ L+ S, cluster=UserID )
959
+
960
+ estimate <- list (fit$coefficients[['L']])
961
+ names(estimate) <- c('limitidealcoefn')
962
+ se <- list (summary(fit)$coefficients[4,2])
963
+ names(se) <- c('limitidealsen')
964
+ pval <- list (summary(fit)$coefficients[4,4])
965
+ names(pval) <- c('pvallimitideal')
966
+ p_value_list <- fit[5]
967
+ p_value <- p_value_list[['p.value']]
968
+
969
+ p_adj <- p.adjust(p_value, method = "BH")
970
+ pvallimit <- list(p_adj[4])
971
+ names(pvallimit) <- 'qadjlimitphonechange'
972
+
973
+
974
+ fit1 <- lm_robust(data=df, formula = S43_PhoneUseChange ~ S1_PhoneUseChange + B+ L+ S, cluster=UserID )
975
+ estimate1 <- list (fit1$coefficients[['L']])
976
+ names(estimate1) <- c('limitidealcoef')
977
+ se1 <- list (summary(fit1)$coefficients[4,2])
978
+ names(se1) <- c('limitidealse')
979
+
980
+ fit2 <- lm_robust(data=df, formula = S43_AddictionIndex_N ~ S1_AddictionIndex_N + B+ L+ S, cluster=UserID )
981
+ estimate2 <- list (fit2$coefficients[['L']])
982
+ names(estimate2) <- c('limitaddictioncoefn')
983
+ se2 <- list (summary(fit2)$coefficients[4,2])
984
+ names(se2) <- c('limitaddictionsen')
985
+ pval2 <- list (summary(fit2)$coefficients[4,4])
986
+ names(pval2) <- c('pvallimitaddict')
987
+ p_value_list <- fit2[5]
988
+ p_value <- p_value_list[['p.value']]
989
+
990
+ p_adj <- p.adjust(p_value, method = "BH")
991
+ pvallimit2 <- list(p_adj[4])
992
+ names(pvallimit2) <- 'qadjlimitaddictionindex'
993
+
994
+ fit3 <- lm_robust(data=df, formula = S43_AddictionIndex ~ S1_AddictionIndex + B+ L+ S, cluster=UserID )
995
+ estimate3 <- list (fit3$coefficients[['L']])
996
+ names(estimate3) <- c('limitaddictioncoef')
997
+ se3 <- list (summary(fit3)$coefficients[4,2])
998
+ names(se3) <- c('limitaddictionse')
999
+
1000
+ fit4 <- lm_robust(data=df, formula = S43_SMSIndex_N ~ S1_SMSIndex_N + B+ L+ S, cluster=UserID )
1001
+ estimate4 <- list (fit4$coefficients[['L']])
1002
+ names(estimate4) <- c('limitsmscoefn')
1003
+ se4 <- list (summary(fit4)$coefficients[4,2])
1004
+ names(se4) <- c('limitsmssen')
1005
+ pval4 <- list (summary(fit4)$coefficients[4,4])
1006
+ names(pval4) <- c('pvallimitsmsindex')
1007
+ p_value_list <- fit4[5]
1008
+ p_value <- p_value_list[['p.value']]
1009
+
1010
+ p_adj <- p.adjust(p_value, method = "BH")
1011
+ pvallimit3 <- list(p_adj[4])
1012
+ names(pvallimit3) <- 'qadjlimitsmsindex'
1013
+
1014
+
1015
+ fit5 <- lm_robust(data=df, formula = S43_SMSIndex ~ S1_SMSIndex + B+ L+ S, cluster=UserID )
1016
+ estimate5 <- list (fit5$coefficients[['L']])
1017
+ names(estimate5) <- c('limitsmscoef')
1018
+ se5 <- list (summary(fit5)$coefficients[4,2])
1019
+ names(se5) <- c('limitsmsse')
1020
+
1021
+
1022
+ fit6 <- lm_robust(data=df, formula = S43_LifeBetter_N ~ S1_LifeBetter_N + B+ L+ S, cluster=UserID )
1023
+ estimate6 <- list (fit6$coefficients[['L']])
1024
+ names(estimate6) <- c('limitlifebettercoefn')
1025
+ se6 <- list (summary(fit6)$coefficients[4,2])
1026
+ names(se6) <- c('limitlifebettersen')
1027
+ pval6 <- list (summary(fit6)$coefficients[4,4])
1028
+ names(pval6) <- c('pvallimitlifebetter')
1029
+ p_value_list <- fit6[5]
1030
+ p_value <- p_value_list[['p.value']]
1031
+
1032
+ p_adj <- p.adjust(p_value, method = "BH")
1033
+
1034
+ pvallimit4 <- list(p_adj[4])
1035
+ names(pvallimit4) <- 'qadjlimitlifebetter'
1036
+
1037
+
1038
+
1039
+ fit7 <- lm_robust(data=df, formula = S43_LifeBetter ~ S1_LifeBetter_N + B+ L+ S, cluster=UserID )
1040
+ estimate7 <- list (fit7$coefficients[['L']])
1041
+ names(estimate7) <- c('limitlifebettercoef')
1042
+ se7 <- list (summary(fit7)$coefficients[4,2])
1043
+ names(se7) <- c('limitlifebetterse')
1044
+
1045
+
1046
+
1047
+
1048
+ fit8 <- lm_robust(data=df, formula = S43_SWBIndex_N ~ S1_SWBIndex_N + B+ L+ S, cluster=UserID )
1049
+ estimate8 <- list (fit8$coefficients[['L']])
1050
+ names(estimate8) <- c('limitswbindexcoefn')
1051
+ se8 <- list (summary(fit8)$coefficients[4,2])
1052
+ names(se8) <- c('limitswbindexsen')
1053
+ pval8 <- list (summary(fit8)$coefficients[4,4])
1054
+ names(pval8) <- c('pvallimitswbindex')
1055
+ p_value_list <- fit8[5]
1056
+ p_value <- p_value_list[['p.value']]
1057
+
1058
+ p_adj <- p.adjust(p_value, method = "BH")
1059
+ pvallimit5 <- list(p_adj[4])
1060
+ names(pvallimit5) <- 'qadjlimitswbindex'
1061
+
1062
+
1063
+ fit9 <- lm_robust(data=df, formula = S43_SWBIndex ~ S1_SWBIndex + B+ L+ S, cluster=UserID )
1064
+ estimate9 <- list (fit9$coefficients[['L']])
1065
+ names(estimate9) <- c('limitswbindexcoef')
1066
+ se9 <- list (summary(fit9)$coefficients[4,2])
1067
+ names(se9) <- c('limitswbindexse')
1068
+
1069
+
1070
+ fit10 <- lm_robust(data=df, formula = S43_index_well_N ~ S1_index_well_N + B+ L+ S, cluster=UserID )
1071
+ estimate10 <- list (fit10$coefficients[['L']])
1072
+ names(estimate10) <- c('limitindexwellcoefn')
1073
+ se10 <- list (summary(fit10)$coefficients[4,2])
1074
+ names(se10) <- c('limitindexwellsen')
1075
+ pval10 <- list (summary(fit10)$coefficients[4,4])
1076
+ names(pval10) <- c('pvallimitindexwell')
1077
+ p_value_list <- fit10[5]
1078
+ p_value <- p_value_list[['p.value']]
1079
+
1080
+ p_adj <- p.adjust(p_value, method = "BH")
1081
+ pvallimit6 <- list(p_adj[4])
1082
+ names(pvallimit6) <- 'qadjlimitindexwell'
1083
+
1084
+ fit11 <- lm_robust(data=df, formula = S43_index_well ~ S1_index_well + B+ L+ S, cluster=UserID )
1085
+ estimate11 <- list (fit11$coefficients[['L']])
1086
+ names(estimate11) <- c('limitindexwellcoef')
1087
+ se11 <- list (summary(fit11)$coefficients[4,2])
1088
+ names(se11) <- c('limitindexwellse')
1089
+
1090
+ limit_effect <- list.merge(estimate, estimate1, estimate2, estimate3, estimate4, estimate5, estimate6,
1091
+ estimate7, estimate8, estimate9, estimate10, estimate11, se, se1, se2, se3, se4, se5, se6, se7, se8,
1092
+ se9, se10, se11, pval, pval2, pval4, pval6, pval8, pval10, pvallimit, pvallimit2, pvallimit3, pvallimit4,
1093
+ pvallimit5, pvallimit6)
1094
+
1095
+ return(limit_effect)
1096
+
1097
+ }
1098
+
1099
+ get_swb_effect_exported_bonus <- function(df){
1100
+ fit<- lm_robust(data=df, formula = S4_PhoneUseChange_N ~ S1_PhoneUseChange_N + B+ L+ S, cluster=UserID )
1101
+
1102
+ estimate <- list (fit$coefficients[['B']])
1103
+ names(estimate) <- c('bonusidealcoefn')
1104
+ se <- list (summary(fit)$coefficients[3,2])
1105
+ names(se) <- c('bonusidealsen')
1106
+ pvaluebonusideal <- list (summary(fit)$coefficients[3,4])
1107
+ names(pvaluebonusideal) <- c('pvaluebonusideal')
1108
+ p_value_list <- fit[5]
1109
+ p_value <- p_value_list[['p.value']]
1110
+
1111
+ p_adj <- p.adjust(p_value, method = "BH")
1112
+ pvalbonus <- list(p_adj[3])
1113
+ names(pvalbonus) <- 'qadjbonusphonechange'
1114
+
1115
+ fit1 <- lm_robust(data=df, formula = S4_PhoneUseChange ~ S1_PhoneUseChange + B+ L+ S, cluster=UserID )
1116
+ estimate1 <- list (fit1$coefficients[['B']])
1117
+ names(estimate1) <- c('bonusidealcoef')
1118
+ se1 <- list (summary(fit1)$coefficients[3,2])
1119
+ names(se1) <- c('bonusidealse')
1120
+
1121
+ fit2 <- lm_robust(data=df, formula = S4_AddictionIndex_N ~ S1_AddictionIndex_N + B+ L+ S, cluster=UserID )
1122
+ estimate2 <- list (fit2$coefficients[['B']])
1123
+ names(estimate2) <- c('bonusaddictioncoefn')
1124
+ se2 <- list (summary(fit2)$coefficients[3,2])
1125
+ names(se2) <- c('bonusaddictionsen')
1126
+ pvaluebonusaddiction <- list (summary(fit2)$coefficients[3,4])
1127
+ names(pvaluebonusaddiction) <- c('pvaluebonusaddiction')
1128
+ p_value_list <- fit2[5]
1129
+ p_value <- p_value_list[['p.value']]
1130
+
1131
+ p_adj <- p.adjust(p_value, method = "BH")
1132
+ pvalbonus2 <- list(p_adj[3])
1133
+ names(pvalbonus2) <- 'qadjbonusaddictionindex'
1134
+
1135
+ fit3 <- lm_robust(data=df, formula = S4_AddictionIndex ~ S1_AddictionIndex + B+ L+ S, cluster=UserID )
1136
+ estimate3 <- list (fit3$coefficients[['B']])
1137
+ names(estimate3) <- c('bonusaddictioncoef')
1138
+ se3 <- list (summary(fit3)$coefficients[3,2])
1139
+ names(se3) <- c('bonusaddictionse')
1140
+
1141
+
1142
+ fit4 <- lm_robust(data=df, formula = S4_SMSIndex_N ~ S1_SMSIndex_N + B+ L+ S, cluster=UserID )
1143
+ estimate4 <- list (fit4$coefficients[['B']])
1144
+ names(estimate4) <- c('bonussmscoefn')
1145
+ se4 <- list (summary(fit4)$coefficients[3,2])
1146
+ names(se4) <- c('bonussmssen')
1147
+ pvaluebonussms <- list (summary(fit4)$coefficients[3,4])
1148
+ names(pvaluebonussms) <- c('pvaluebonussms')
1149
+ p_value_list <- fit4[5]
1150
+ p_value <- p_value_list[['p.value']]
1151
+
1152
+ p_adj <- p.adjust(p_value, method = "BH")
1153
+ pvalbonus3 <- list(p_adj[3])
1154
+ names(pvalbonus3) <- 'qadjbonussmsnindex'
1155
+
1156
+
1157
+ fit5 <- lm_robust(data=df, formula = S4_SMSIndex ~ S1_SMSIndex + B+ L+ S, cluster=UserID )
1158
+ estimate5 <- list (fit5$coefficients[['B']])
1159
+ names(estimate5) <- c('bonussmscoef')
1160
+ se5 <- list (summary(fit5)$coefficients[3,2])
1161
+ names(se5) <- c('bonussmsse')
1162
+
1163
+
1164
+ fit6 <- lm_robust(data=df, formula = S4_LifeBetter_N ~ S1_LifeBetter_N + B+ L+ S, cluster=UserID )
1165
+ estimate6 <- list (fit6$coefficients[['B']])
1166
+ names(estimate6) <- c('bonuslifebettercoefn')
1167
+ se6 <- list (summary(fit6)$coefficients[3,2])
1168
+ names(se6) <- c('bonuslifebettersen')
1169
+ pvaluebonuslifebetter <- list (summary(fit6)$coefficients[3,4])
1170
+ names(pvaluebonuslifebetter) <- c('pvaluebonuslifebetter')
1171
+ p_value_list <- fit6[5]
1172
+ p_value <- p_value_list[['p.value']]
1173
+
1174
+ p_adj <- p.adjust(p_value, method = "BH")
1175
+ pvalbonus4 <- list(p_adj[3])
1176
+ names(pvalbonus4) <- 'qadjbonuslifebetter'
1177
+
1178
+ fit7 <- lm_robust(data=df, formula = S4_LifeBetter ~ S1_LifeBetter_N + B+ L+ S, cluster=UserID )
1179
+ estimate7 <- list (fit7$coefficients[['B']])
1180
+ names(estimate7) <- c('bonuslifebettercoef')
1181
+ se7 <- list (summary(fit7)$coefficients[3,2])
1182
+ names(se7) <- c('bonuslifebetterse')
1183
+
1184
+
1185
+
1186
+ fit8 <- lm_robust(data=df, formula = S4_SWBIndex_N ~ S1_SWBIndex_N + B+ L+ S, cluster=UserID )
1187
+ estimate8 <- list (fit8$coefficients[['B']])
1188
+ names(estimate8) <- c('bonusswbindexcoefn')
1189
+ se8 <- list (summary(fit8)$coefficients[3,2])
1190
+ names(se8) <- c('bonusswbindexsen')
1191
+ pvaluebonusswbindex <- list (summary(fit8)$coefficients[3,4])
1192
+ names(pvaluebonusswbindex) <- c('pvaluebonusswbindex')
1193
+ p_value_list <- fit8[5]
1194
+ p_value <- p_value_list[['p.value']]
1195
+
1196
+ p_adj <- p.adjust(p_value, method = "BH")
1197
+ pvalbonus5 <- list(p_adj[3])
1198
+ names(pvalbonus5) <- 'qadjbonusswbindex'
1199
+
1200
+
1201
+
1202
+ fit9 <- lm_robust(data=df, formula = S4_SWBIndex ~ S1_SWBIndex + B+ L+ S, cluster=UserID )
1203
+ estimate9 <- list (fit9$coefficients[['B']])
1204
+ names(estimate9) <- c('bonusswbindexcoef')
1205
+ se9 <- list (summary(fit9)$coefficients[3,2])
1206
+ names(se9) <- c('bonusswbindexse')
1207
+
1208
+
1209
+ fit10 <- lm_robust(data=df, formula = S4_index_well_N ~ S1_index_well_N + B+ L+ S, cluster=UserID )
1210
+ estimate10 <- list (fit10$coefficients[['B']])
1211
+ names(estimate10) <- c('bonusindexwellcoefn')
1212
+ se10 <- list (summary(fit10)$coefficients[3,2])
1213
+ names(se10) <- c('bonusindexwellsen')
1214
+ pvaluebonusindexwell <- list (summary(fit10)$coefficients[3,4])
1215
+ names(pvaluebonusindexwell) <- c('pvaluebonusindexwell')
1216
+ p_value_list <- fit10[5]
1217
+ p_value <- p_value_list[['p.value']]
1218
+
1219
+ p_adj <- p.adjust(p_value, method = "BH")
1220
+ pvalbonus6 <- list(p_adj[3])
1221
+ names(pvalbonus6) <- 'qadjbonusindexwell'
1222
+
1223
+
1224
+ fit11 <- lm_robust(data=df, formula = S4_index_well ~ S1_index_well + B+ L+ S, cluster=UserID )
1225
+ estimate11 <- list (fit11$coefficients[['B']])
1226
+ names(estimate11) <- c('bonusindexwellcoef')
1227
+ se11 <- list (summary(fit11)$coefficients[3,2])
1228
+ names(se11) <- c('bonusindexwellse')
1229
+
1230
+ bonus_effect <- list.merge(estimate, estimate1, estimate2, estimate3, estimate4, estimate5, estimate6, estimate7,
1231
+ estimate8, estimate9, estimate10, estimate11, se, se1, se2, se3, se4, se5, se6, se7, se8, se9, se10, se11,
1232
+ pvaluebonusideal, pvaluebonusaddiction, pvaluebonussms, pvaluebonuslifebetter, pvaluebonusswbindex,
1233
+ pvaluebonusindexwell, pvalbonus, pvalbonus2, pvalbonus3, pvalbonus4, pvalbonus5, pvalbonus6)
1234
+
1235
+ return(bonus_effect)
1236
+
1237
+ }
1238
+
1239
+
1240
+ plot_histogram_predicted <- function(df, filename){
1241
+ df_usage_predict <- df %>%
1242
+ filter(B==0 & L==0) %>%
1243
+ select(PD_P2_UsageFITSBY, PD_P3_UsageFITSBY, PD_P4_UsageFITSBY,
1244
+ S2_PredictUseNext_1, S3_PredictUseNext_1, S4_PredictUseNext_1) %>%
1245
+ mutate(diff2 =PD_P2_UsageFITSBY -S2_PredictUseNext_1,
1246
+ diff3 = PD_P3_UsageFITSBY - S3_PredictUseNext_1,
1247
+ diff4 = PD_P4_UsageFITSBY - S4_PredictUseNext_1) %>%
1248
+ rowwise() %>%
1249
+ mutate(diff = mean(c(diff2, diff3, diff4), na.rm=T))
1250
+
1251
+ a<- ggplot(df_usage_predict, aes(x=diff)) +
1252
+ geom_histogram(aes(y = stat(count) / sum(count)), colour=maroon, fill=maroon) +
1253
+ xlim(c(-150, 150)) +
1254
+ theme_classic() +
1255
+ labs(x = "Actual minus predicted FITSBY use (minutes/day)",
1256
+ y="Fraction of control group") +
1257
+ theme(panel.grid.major.x = element_blank(),
1258
+ panel.grid.major.y = element_line( size=.1, color="lightsteelblue"))
1259
+
1260
+ ggsave(sprintf('output/%s.pdf', filename), plot=a, width=6.5, height=4.5, units="in")
1261
+
1262
+ }
1263
+
1264
+ plot_individual_temptation_effects <- function(df, param_full, filename){
1265
+
1266
+ tau_data <- df %>%
1267
+ select(
1268
+ UserID,
1269
+ w, L, B, S,
1270
+ PD_P1_UsageFITSBY,
1271
+ PD_P2_UsageFITSBY,
1272
+ PD_P3_UsageFITSBY,
1273
+ PD_P4_UsageFITSBY,
1274
+ PD_P5_UsageFITSBY,
1275
+ PD_P2_LimitTightFITSBY
1276
+ )
1277
+
1278
+ fit_2 <- tau_data %>%
1279
+ mutate(tightness=ifelse(L,PD_P2_LimitTightFITSBY, 0)) %>%
1280
+ lm(formula = 'PD_P2_UsageFITSBY ~ PD_P1_UsageFITSBY + L + tightness + B + S',
1281
+ weights = w)
1282
+
1283
+ const_2 <- fit_2$coefficients['L']
1284
+ slope_2 <- fit_2$coefficients['tightness']
1285
+
1286
+
1287
+ fit_3 <- tau_data %>%
1288
+ mutate(tightness=ifelse(L,PD_P2_LimitTightFITSBY, 0)) %>%
1289
+ lm(formula = 'PD_P3_UsageFITSBY ~ PD_P1_UsageFITSBY + L + tightness + B + S',
1290
+ weights = w)
1291
+
1292
+ const_3 <- fit_3$coefficients['L']
1293
+ slope_3 <- fit_3$coefficients['tightness']
1294
+
1295
+
1296
+
1297
+ df <- df %>%
1298
+ mutate(tau_tilde_L = const_3 + slope_3*PD_P3_LimitTightFITSBY,
1299
+ tau_L_2 = const_2 + slope_2 *PD_P2_LimitTightFITSBY) %>%
1300
+ mutate(x_ss_i_data = PD_P1_UsageFITSBY)
1301
+
1302
+ rho <- param_full[['rho']]
1303
+ alpha <- param_full[['alpha']]
1304
+ lambda <- param_full[['lambda']]
1305
+ delta <- param_full[['delta']]
1306
+ eta <- param_full[['eta']]
1307
+ zeta <- param_full[['zeta']]
1308
+ omega <- param_full[['omega']]
1309
+ naivete <- param_full[['naivete']]
1310
+ mispredict <- param_full[['mispredict']]
1311
+
1312
+ df <- df %>%
1313
+ mutate(num = eta*tau_L_2/omega - (1-alpha)*delta*rho*(((eta-zeta)*tau_tilde_L/omega+zeta*rho*tau_L_2/omega) + (1+lambda)*mispredict*(-eta+(1-alpha)*delta*rho^2*((eta-zeta)*lambda+zeta))),
1314
+ denom = 1 - (1-alpha)*delta*rho*(1+lambda),
1315
+ gamma_spec = num/denom,
1316
+ gamma_tilde_spec = gamma_spec - naivete)
1317
+
1318
+
1319
+ df <- df %>%
1320
+ mutate(intercept_spec = calculate_intercept_spec(x_ss_i_data, param_full, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta)) %>%
1321
+ mutate(x_ss_spec = calculate_steady_state(param_full, gamma_tilde_spec, gamma_spec, alpha, rho, lambda, mispredict, eta, zeta, intercept_spec),
1322
+ x_ss_zero_un =calculate_steady_state(param_full, 0, 0, alpha, rho, lambda, 0, eta, zeta, intercept_spec),
1323
+ x_ss_zero =ifelse(x_ss_zero_un<0, 0, x_ss_zero_un),
1324
+ delta_x = x_ss_spec - x_ss_zero,
1325
+ delta_x_zero =ifelse(delta_x<0, 0, delta_x),
1326
+ delta_x_zero_3300 = ifelse(delta_x_zero>300, 300, delta_x_zero))
1327
+
1328
+ temptation_effect_below_ten <- nrow(df %>% filter(delta_x_zero_3300<10)) / nrow(df %>% filter(!is.na(delta_x_zero_3300)))
1329
+ temptationeffectbelowten <- signif(temptation_effect_below_ten, digits=2)*100
1330
+
1331
+ temptation_effect_above_100 <- nrow(df %>% filter(delta_x_zero_3300>100)) / nrow(df %>% filter(!is.na(delta_x_zero_3300)))
1332
+ temptationeffectabovehundred <- signif(temptation_effect_above_100, digits=2)*100
1333
+
1334
+
1335
+ estimate <-
1336
+ list(temptationeffectbelowten, temptationeffectabovehundred)
1337
+ names(estimate) <- c('temptationeffectbelowten', 'temptationeffectabovehundred')
1338
+
1339
+ save_nrow(estimate, filename ="individual_temptation_scalars", suffix="")
1340
+
1341
+
1342
+ a<- ggplot(df, aes(x=delta_x_zero_3300)) +
1343
+ geom_histogram(aes(y = stat(count) / sum(count)), colour=maroon, fill=maroon) +
1344
+ xlim(c(0, 300)) +
1345
+ ylim(c(0,0.11)) +
1346
+ theme_classic() +
1347
+ labs(x = "Effect of temptation on FITSBY use (minutes/day)",
1348
+ y="Fraction of sample") +
1349
+ theme(panel.grid.major.x = element_blank(),
1350
+ panel.grid.major.y = element_line( size=.1, color="lightsteelblue"))
1351
+
1352
+ ggsave(sprintf('output/%s.pdf', filename), plot=a, width=6.5, height=4.5, units="in")
1353
+ }
1354
+
1355
+
1356
+
1357
+
1358
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1359
+ # Excecute
1360
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1361
+ main <- function(){
1362
+
1363
+ df <- import_data() %>%
1364
+ mutate(addiction_decile = add_deciles(StratAddictionLifeIndex)) %>%
1365
+ mutate(restriction_decile = add_deciles(StratWantRestrictionIndex, step=0.125)) %>%
1366
+ mutate(tightness_decile = add_deciles(PD_P2_LimitTightFITSBY, step=1/6)) %>%
1367
+ mutate(S2_PredictUseBonus = S2_PredictUseInitial * (1 - (S2_PredictUseBonus / 100))) %>%
1368
+ mutate(f_tilde_2_B = (S2_PredictUseBonus + S2_PredictUseInitial) / 2) %>%
1369
+ mutate(behavioral_change_premium = (S2_MPL/num_days) - F_B + (p_B*f_tilde_2_B))
1370
+
1371
+ param <- param_initial
1372
+ param_full <- estimate_model(df, param)
1373
+ plot_individual_temptation_effects(df, param_full, filename="hist_individual_temptation_effects")
1374
+
1375
+
1376
+ bonus_effect <-get_swb_effect_exported_bonus(df)
1377
+ limit_effect <-get_swb_effect_exported_limit(df)
1378
+
1379
+ swb_effects <- list.merge(bonus_effect, limit_effect)
1380
+ save_tex2(swb_effects, filename="swb_effects")
1381
+ save_tex_one(swb_effects, filename="swb_effects_onedigit", suffix="one")
1382
+
1383
+ tau_data <- reshape_tau_data(df)
1384
+ tightness_df <- reshape_tightness(df)
1385
+ mpd_df <- reshape_mispredict(df)
1386
+
1387
+ plot_taus(df, tau_data, tightness_df)
1388
+ plot_valuations(df)
1389
+ plot_mispredict(mpd_df)
1390
+ print('here')
1391
+ find_tau_spec(df)
1392
+ print('past here')
1393
+ plot_treatment_effects(df, filename1="treatment_effects_periods_limit_bonus", filename2="treatment_effects_periods_bonus", filename3="treatment_effects_periods_limit")
1394
+ plot_treatment_effects_interaction(df, filename1 = "interaction_treatment_effects")
1395
+ plot_weekly_effects(df, filename1="treatment_effects_weeks_bonus", filename2 = "treatment_effects_weeks_limit")
1396
+ #get_opt(df)
1397
+ get_addiction_scalar(df)
1398
+ plot_histogram_predicted(df, filename="histogram_predicted_actual_p24")
1399
+
1400
+ df %<>% balance_data(magnitude=3)
1401
+ plot_treatment_effects(df, filename1="treatment_effects_periods_limit_bonus_balanced", filename2="treatment_effects_periods_bonus_balanced", filename3="treatment_effects_periods_limit_balanced")
1402
+ get_addiction_treatment_effect(df, filename="coef_usage_self_control_balance")
1403
+
1404
+ }
1405
+
1406
+ main()
17/replication_package/code/analysis/treatment_effects/code/SurveyValidation.do ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Description of data
2
+
3
+ ***************
4
+ * Environment *
5
+ ***************
6
+
7
+ clear all
8
+ adopath + "input/lib/ado"
9
+ adopath + "input/lib/stata/ado"
10
+
11
+ *********************
12
+ * Utility functions *
13
+ *********************
14
+
15
+ program define_settings
16
+ global DESCRIPTIVE_TAB ///
17
+ collabels(none) nodepvars noobs replace
18
+ end
19
+
20
+ **********************
21
+ * Analysis functions *
22
+ **********************
23
+
24
+ program main
25
+ import_data
26
+ define_settings
27
+
28
+ correlation_motivation
29
+ reg_prediction_reward
30
+ end
31
+
32
+ program import_data
33
+ use "input/final_data_sample.dta", clear
34
+ end
35
+
36
+ program correlation_motivation
37
+ preserve
38
+
39
+ generate tightness=0
40
+ replace tightness=PD_P2_LimitTightFITSBY if (S2_LimitType != 0)
41
+
42
+ foreach v in S1_PhoneUseChange S1_AddictionIndex S1_SMSIndex S1_LifeBetter{
43
+ replace `v' = -`v'
44
+ }
45
+ correlate S2_Benchmark S3_MPLLimit tightness S1_InterestInLimits S1_PhoneUseChange S1_AddictionIndex S1_SMSIndex S1_LifeBetter
46
+ matrix define correlation = r(C)
47
+
48
+ drop *
49
+
50
+ svmat correlation
51
+ qui ds
52
+ foreach i of numlist 1/8 {
53
+ replace correlation`i' = . if _n < `i'
54
+ }
55
+
56
+ dta_to_txt, saving(output/motivation_correlation.txt) title(<tab:motivation_correlation>) nonames replace
57
+ dta_to_txt, saving(output/motivation_correlation_beamer.txt) title(<tab:motivation_correlation_beamer>) nonames replace
58
+
59
+ restore
60
+ end
61
+
62
+ program reg_prediction_reward
63
+ preserve
64
+
65
+ * make a dummy for if high reward
66
+ gen PredictRewardHigh = 1 if PredictReward == 5
67
+ replace PredictRewardHigh = 0 if PredictReward == 1
68
+
69
+
70
+ * Reshape data to use predictions from all three surveys
71
+ keep UserID PredictRewardHigh S*_PredictUseNext_1
72
+ local indep UserID PredictRewardHigh
73
+ rename_but, varlist(`indep') prefix(outcome)
74
+ reshape long outcome, i(`indep') j(measure) string
75
+
76
+ gen survey = substr(measure, 2, 1)
77
+ drop measure
78
+
79
+ rename outcome predicted
80
+
81
+ * Save to be merged later
82
+ tempfile temp
83
+ save `temp'
84
+
85
+ restore
86
+
87
+
88
+ preserve
89
+ * Reshape data to use actual predictions from those periods
90
+ keep UserID PD_P2_UsageFITSBY PD_P3_UsageFITSBY PD_P4_UsageFITSBY
91
+ local indep UserID
92
+
93
+ rename_but, varlist(`indep') prefix(outcome)
94
+ reshape long outcome, i(`indep') j(measure) string
95
+
96
+ gen survey = substr(measure, 5, 1)
97
+ drop measure
98
+
99
+ rename outcome actual
100
+
101
+ * Re-join with the actual predictions
102
+ merge 1:1 UserID survey using `temp'
103
+
104
+ * Run the regressions in question
105
+ reg predicted PredictRewardHigh, robust
106
+ est store predicted
107
+
108
+ reg actual PredictRewardHigh, robust
109
+ est store actual
110
+
111
+ gen pred_min_actual = predicted - actual
112
+ reg pred_min_actual PredictRewardHigh, robust
113
+ est store pred_min_actual
114
+
115
+ gen abs_pred_min_actual = abs(predicted - actual)
116
+ reg abs_pred_min_actual PredictRewardHigh, robust
117
+ est store abs_pred_min_actual
118
+
119
+ * Save the regressions as a table
120
+ esttab predicted actual pred_min_actual abs_pred_min_actual ///
121
+ using "output/high_reward_reg.tex", ///
122
+ mtitle("\shortstack{Predicted\\use}" ///
123
+ "\shortstack{Actual\\use}" ///
124
+ "\shortstack{Predicted -\\actual use}" ///
125
+ "\shortstack{Absolute value of\\predicted - actual\\use}") ///
126
+ coeflabels(PredictRewardHigh "High prediction reward" ///
127
+ _cons "Constant") ///
128
+ $DESCRIPTIVE_TAB se nostar nonotes
129
+ restore
130
+ end
131
+
132
+ ***********
133
+ * Execute *
134
+ ***********
135
+
136
+ main
17/replication_package/code/analysis/treatment_effects/input.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e99d601b158b198f1db1ad6c634f9c4011573ec45848fe7d4e716fc3e26cac3
3
+ size 914
17/replication_package/code/analysis/treatment_effects/make.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################
2
+ ### ENVIRONMENT ###
3
+ ###################
4
+ import git
5
+ import imp
6
+ import os
7
+
8
+ ### SET DEFAULT PATHS
9
+ ROOT = '../..'
10
+
11
+ PATHS = {
12
+ 'root' : ROOT,
13
+ 'lib' : os.path.join(ROOT, 'lib'),
14
+ 'config' : os.path.join(ROOT, 'config.yaml'),
15
+ 'config_user' : os.path.join(ROOT, 'config_user.yaml'),
16
+ 'input_dir' : 'input',
17
+ 'external_dir' : 'external',
18
+ 'output_dir' : 'output',
19
+ 'output_local_dir' : 'output_local',
20
+ 'makelog' : 'log/make.log',
21
+ 'output_statslog' : 'log/output_stats.log',
22
+ 'source_maplog' : 'log/source_map.log',
23
+ 'source_statslog' : 'log/source_stats.log',
24
+ }
25
+
26
+ ### LOAD GSLAB MAKE
27
+ f, path, desc = imp.find_module('gslab_make', [PATHS['lib']])
28
+ gs = imp.load_module('gslab_make', f, path, desc)
29
+
30
+ ### LOAD CONFIG USER
31
+ PATHS = gs.update_paths(PATHS)
32
+ gs.update_executables(PATHS)
33
+
34
+ ############
35
+ ### MAKE ###
36
+ ############
37
+
38
+ ### START MAKE
39
+ gs.remove_dir(['input', 'external'])
40
+ gs.clear_dir(['output', 'log', 'temp'])
41
+ gs.start_makelog(PATHS)
42
+
43
+ ### GET INPUT FILES
44
+ inputs = gs.link_inputs(PATHS, ['input.txt'])
45
+ # gs.write_source_logs(PATHS, inputs + externals)
46
+ # gs.get_modified_sources(PATHS, inputs + externals)
47
+
48
+ ### RUN SCRIPTS
49
+ """
50
+ Critical
51
+ --------
52
+ Many of the Stata analysis scripts recode variables using
53
+ the `recode` command. Double-check all `recode` commands
54
+ to confirm recoding is correct, especially when reusing
55
+ code for a different experiment version.
56
+ """
57
+
58
+ gs.run_stata(PATHS, program = 'code/CommitmentResponse.do')
59
+ gs.run_stata(PATHS, program = 'code/HabitFormation.do')
60
+ gs.run_stata(PATHS, program = 'code/Heterogeneity.do')
61
+ gs.run_stata(PATHS, program = 'code/SurveyValidation.do')
62
+ gs.run_stata(PATHS, program = 'code/FDRTable.do')
63
+ gs.run_stata(PATHS, program = 'code/HeterogeneityInstrumental.do')
64
+ gs.run_stata(PATHS, program = 'code/Beliefs.do')
65
+
66
+ gs.run_r(PATHS, program = 'code/ModelHeterogeneity.R')
67
+
68
+ ### LOG OUTPUTS
69
+ gs.log_files_in_output(PATHS)
70
+
71
+ ### CHECK FILE SIZES
72
+ # gs.check_module_size(PATHS)
73
+
74
+ ### END MAKE
75
+ gs.end_makelog(PATHS)
17/replication_package/code/codebook.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7c741990bd21eb9bab76643df657fff0f7bfc2c8500f64325178d371127a16e
3
+ size 63101
17/replication_package/code/config.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #####################################################################
3
+ # Is git LFS required to run this repository?
4
+ #
5
+ # This normally remain Yes, as this prevents inadvertently
6
+ # committing large data files
7
+ #####################################################################
8
+ git_lfs_required: Yes
9
+
10
+ #####################################################################
11
+ # Other required software
12
+ #####################################################################
13
+ gslab_make_required: Yes
14
+
15
+ software_required:
16
+ r: No
17
+ stata: No
18
+ lyx: Yes
19
+ matlab: No
20
+ latex: No
21
+
22
+ #####################################################################
23
+ # Maximum allowed file sizes
24
+ #####################################################################
25
+ max_file_sizes:
26
+ file_MB_limit_lfs: 100 # Soft limit on file size (w/ LFS)
27
+ total_MB_limit_lfs: 500 # Soft limit on total size (w/ LFS)
28
+ file_MB_limit: 0.5 # Soft limit on file size (w/o LFS)
29
+ total_MB_limit: 100 # Soft limit on total size (w/o LFS)
30
+
31
+ metadata:
32
+ payment:
33
+ bonus: 50
34
+ fixed_rate: 50
35
+ strata: i.Stratifier
36
+
37
+ #####################################################################
38
+ # Repository metadata
39
+ #####################################################################
40
+
41
+ # Experiment Name (could equal, for example, to 'Pilot#', 'Temptation'. If set to 'Scratch', the pipeline will process dummy data).
42
+ experiment_name: "Temptation"
43
+
44
+ # Survey Dates (Note that the 'Phase{#}Start' surveys are just fillers for the post study phases)
45
+ surveys:
46
+ Recruitment:
47
+ Start: !!timestamp "2020-03-22 12:00:00"
48
+ End: !!timestamp "2020-04-10 10:45:00"
49
+ Baseline:
50
+ Start: !!timestamp "2020-04-12 08:00:00"
51
+ End: !!timestamp "2020-04-13 16:00:00"
52
+ Midline:
53
+ Start: !!timestamp "2020-05-03 0:00:00"
54
+ End: !!timestamp "2020-05-11 23:59:00"
55
+ Endline1:
56
+ Start: !!timestamp "2020-05-24 08:00:00"
57
+ End: !!timestamp "2020-05-31 08:00:00"
58
+ Endline2:
59
+ Start: !!timestamp "2020-06-14 08:00:00"
60
+ End: !!timestamp "2020-06-22 17:00:00"
61
+ Phase5Start:
62
+ Start: !!timestamp "2020-07-05 08:00:00"
63
+ End: !!timestamp "2020-07-05 23:59:00"
64
+ Phase6Start:
65
+ Start: !!timestamp "2020-07-26 08:00:00"
66
+ End: !!timestamp "2020-07-26 23:59:00"
67
+ Phase7Start:
68
+ Start: !!timestamp "2020-08-16 08:00:00"
69
+ End: !!timestamp "2020-08-16 23:59:00"
70
+ Phase8Start:
71
+ Start: !!timestamp "2020-09-06 08:00:00"
72
+ End: !!timestamp "2020-09-06 23:59:00"
73
+ Phase9Start:
74
+ Start: !!timestamp "2020-09-27 08:00:00"
75
+ End: !!timestamp "2020-09-27 23:59:00"
76
+ Phase10Start:
77
+ Start: !!timestamp "2020-10-18 08:00:00"
78
+ End: !!timestamp "2020-10-18 23:59:00"
79
+ Phase11Start:
80
+ Start: !!timestamp "2020-11-08 08:00:00"
81
+ End: !!timestamp "2020-11-08 23:59:00"
82
+ Enrollment:
83
+ Start: !!timestamp "2020-04-09 9:00:00"
84
+ End: !!timestamp "2020-04-11 12:00:00"
85
+ WeeklyText:
86
+ Start: !!timestamp "2020-03-25 00:00:00"
87
+ End: !!timestamp "2020-03-30 23:59:00"
88
+ PDBug:
89
+ Start: !!timestamp "2020-04-24 18:40:00"
90
+ End: !!timestamp "2020-04-28 23:59:00"
91
+ TextSurvey1:
92
+ Start: !!timestamp "2020-04-12 08:00:00"
93
+ End: !!timestamp "2020-06-17 00:00:00"
94
+ TextSurvey2:
95
+ Start: !!timestamp "2020-04-12 08:00:00"
96
+ End: !!timestamp "2020-06-17 00:00:00"
97
+ TextSurvey3:
98
+ Start: !!timestamp "2020-04-12 08:00:00"
99
+ End: !!timestamp "2020-06-17 00:00:00"
100
+ TextSurvey4:
101
+ Start: !!timestamp "2020-04-12 08:00:00"
102
+ End: !!timestamp "2020-06-17 00:00:00"
103
+ TextSurvey5:
104
+ Start: !!timestamp "2020-04-12 08:00:00"
105
+ End: !!timestamp "2020-06-17 00:00:00"
106
+ TextSurvey6:
107
+ Start: !!timestamp "2020-04-12 08:00:00"
108
+ End: !!timestamp "2020-06-17 00:00:00"
109
+ TextSurvey7:
110
+ Start: !!timestamp "2020-04-12 08:00:00"
111
+ End: !!timestamp "2020-06-17 00:00:00"
112
+ TextSurvey8:
113
+ Start: !!timestamp "2020-04-12 08:00:00"
114
+ End: !!timestamp "2020-06-17 00:00:00"
115
+ TextSurvey9:
116
+ Start: !!timestamp "2020-04-12 20:00:00"
117
+ End: !!timestamp "2020-06-17 00:00:00"
118
+
119
+ # Date Range of Data Used in Study (range of data we pull PD data)
120
+ date_range:
121
+ first_pull: !!timestamp "2020-03-21 00:00:00"
122
+ last_pull: !!timestamp "2020-11-15 00:00:00"
17/replication_package/code/config_user.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #####################################################################
2
+ # Make a copy of this file called config_user.yaml and place
3
+ # it at the root level of the repository
4
+ #
5
+ # This file holds local settings specific to your computing
6
+ # environment. It should not be committed to the repository.
7
+ #####################################################################
8
+
9
+ #####################################################################
10
+ # External dependencies
11
+ #
12
+ # This section defines resources used by the code that are external
13
+ # to the repository. Code should never reference any files external
14
+ # to the repository except via these paths.
15
+ #
16
+ # Each external resource is defined by a key with a value equal
17
+ # to the local path to the resource. These
18
+ # keys should be short descriptive names that will then be used
19
+ # to refer to these resources in code. E.g., "raw_data",
20
+ # "my_other_repo", etc. Defaults can optionally be placed in
21
+ # brackets after the colon
22
+ #
23
+ # Replace the paths below with correct local paths on your machine
24
+ #
25
+ #####################################################################
26
+ external:
27
+ dropbox: /project #Point to PhoneAddiction Dropbox Root
28
+
29
+ #####################################################################
30
+ # Local settings
31
+ #
32
+ # This section defines parameters specific to each user's local
33
+ # environment.
34
+ #
35
+ # Examples include names of executables, usernames, etc. These
36
+ # variables should NOT be used to store passwords.
37
+ #
38
+ # Each parameter is defined by a key with default value. These
39
+ # keys should be short descriptive names that will then be used
40
+ # to refer to the parameters in code.
41
+ #
42
+ #####################################################################
43
+ local:
44
+
45
+ # Executable names
46
+ executables:
47
+
48
+ python: python
49
+ r: Rscript
50
+ stata: stata-mp
51
+ matlab: matlab
52
+ lyx: lyx
53
+ latex: latex
54
+
55
+ # Data Run
56
+ #if true, data/run will start by reading in the latest raw master data file, instead of processing raw phone dashboard data
57
+ skip_building: True
58
+
59
+ # if true, will process new data in parallel. only relevant if skip_building == False
60
+ parallel: False
61
+ cores: 4
62
+
63
+ #if true, will use all data in DataTest and ConfidentialTest
64
+ test: False
65
+
66
+ #if true, stdout will write to data/log/mb_log.log instead of to terminal
67
+ log: False
17/replication_package/code/data/README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # data
2
+ This module contains all code that preps for analysis and produces survey management deliverables (e.g. contact lists). The dataset needed to run this module rely on confidential data, and were thus omitted from this replication archive.
3
+
4
+ We detail how, in the presence of the raw confidential data, this module construct the main datasets.
5
+
6
+ #### 1. Pipeline overview
7
+ We run the whole data pipeline by calling data/make.py, which will call run 3 main sub modules below. Note:
8
+ many classes and functions required in this pipeline are located in lib/data_helpers.
9
+
10
+ 1. source/build_master
11
+
12
+ a. Purpose: a builder object ( in builder.py) will pull all the raw data, detect gaming, clean each individual data files, merge them
13
+ on the user level and the user_day_app level.
14
+
15
+ b. Input:
16
+ i. Raw Survey
17
+ ii. Phone Dashboard
18
+
19
+ c. Output:
20
+ i. master_raw_user.pickle, a raw master file on the user level, that will contain data from surveys and PD data for each phase
21
+ ii. master_user_day_app.pickle, a clean master file on the user day app level that will contain use, limit, and snooze activity
22
+
23
+ 2. source/clean_master
24
+
25
+ a. Purpose: a cleaner object (in cleaner.py) will clean the raw_master_user.pickle, and assign treatments, calculate earnings,
26
+ and create outcome variables
27
+
28
+ b. Input: raw_master_user.pickle
29
+
30
+ c. Output: clean_master_user.pickle
31
+
32
+ 3. source/exporters
33
+
34
+ a. Purpose: creates contact lists, tango cards, phone dashboard treatment configs, and analysis files ready for stata
35
+
36
+ b. Input: master_clean_user.pickle and master_user_day_app.pickle
37
+
38
+ c. Output:
39
+ i. Contact Lists, Tango Cards, Phone Dashboard, Treatment Configs, and other data with identifiable info will output into /Dropbox/PhoneAddiction/Confidential
40
+ ii. pre_analysis_user.csv and pre_analysis_user_app_day.csv in /Dropbox/PhoneAddiction/Data/{experiment_name}/Intermediate
41
+
42
+ ## 2. Configurations:
43
+ - root/config_user.yaml: configurations that alter how the pipeline is run. Read through those in the yaml file, but to highlight:
44
+ 1. skip_building: if true, data/run will start by reading in the latest raw master data file, instead of processing raw phone dashboard data. You should not attempt run the raw PD data unless you're on Sherlock or some HPC
45
+ 2. test: if set true, this runs nearly the full pipeline, but with dummy data. Data is saved in DataTest and ConfidentialTest. This is helpful when testing something in the build class
46
+
47
+ - root/config.yaml: sets significant experiment dates (survey dates, and date range of PD data pull)
48
+
49
+ - root/lib/experiment_specs: contains detailed specs for the data pipeline. Check out the README in that folder for specifics.
50
+
51
+ ## 3. Raw Phone Dashboard Data Exports
52
+ - All PD Data arrives in the PhonedashboardPort dropbox folder. All these files are processed by functions
53
+ in data/source/build_master/builder.py and helper functions in lib/data_helpers
54
+
55
+ ## Snooze Events
56
+ 1. PD receives usage ping from ForegroundApplication generator.
57
+
58
+ 2. If app usage is within X minutes of budget being exhausted:
59
+
60
+ 2.a: PD does not block app, but launches warning activity with the package and metadata.
61
+
62
+ 2.b: Warning activity throws up warning dialog (event = app-block-warning).
63
+
64
+ 2.c: User closes / cancels dialog (event = closed-warning).
65
+
66
+ 3. If app usage is past budget AND has been snoozed, and snooze delay has not elapsed:
67
+
68
+ 3.a: PD blocks app (returns system to the home screen (event = blocked_app).
69
+
70
+ 3.b: PD shows dialog letting user know that delay hasn’t elapsed (event = app-blocked-no-snooze).
71
+
72
+ 3.c: User closes/cancels dialog (event = app-blocked-no-snooze-closed).
73
+
74
+ 3. If app usage is past budget AND app has not been snoozed:
75
+
76
+ 3.a: PD blocks app (returns system to the home screen (event = blocked_app).
77
+
78
+ 3.b: PD shows dialog letting user budget is exhausted (event = app-blocked-no-snooze).
79
+
80
+ 3.c: If snooze is NOT enabled for user:
81
+
82
+ 3.c.1: PD shows dialog that user cannot use app until tomorrow (event = app-blocked-no-snooze).
83
+
84
+ 3.c.2: User closes / cancels dialog (event = app-blocked-no-snooze-closed).
85
+
86
+ 3.d: If snooze IS enabled for user:
87
+
88
+ 3.d.1: PD shows dialog letting user know budget is up (event = app-blocked-can-snooze).
89
+
90
+ 3.d.2: If user closes / cancels dialog (event = skipped-snooze).
91
+
92
+ 3.d.3: If user decides to snooze, PD shows dialog asking about snooze amount (no event generated).
93
+
94
+ 3.d.3.a: User closes / cancels snooze dialog, without setting limit (event = cancelled-snooze).
95
+
96
+ 3.d.3.b: User sets snooze amount (event = snoozed-app-limit).
17/replication_package/code/data/__init__.py ADDED
File without changes
17/replication_package/code/data/external.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8534419da84c6a8c9f902d366dd6c964c2ec49c804730cc4ad333a7d7f05a39
3
+ size 1135
17/replication_package/code/data/input.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47d958523f79a58631b695d9e13762414d22ce0055ae9ac8b9f6ad63c17026c1
3
+ size 676
17/replication_package/code/data/make.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###################
2
+ ### ENVIRONMENT ###
3
+ ###################
4
+ import git
5
+ import imp
6
+ import os
7
+ import yaml
8
+
9
+ ### SET DEFAULT PATHS
10
+ ROOT = git.Repo('.', search_parent_directories = True).working_tree_dir
11
+
12
+ PATHS = {
13
+ 'root' : ROOT,
14
+ 'lib' : os.path.join(ROOT, 'lib'),
15
+ 'config' : os.path.join(ROOT, 'config.yaml'),
16
+ 'config_user' : os.path.join(ROOT, 'config_user.yaml'),
17
+ 'input_dir' : 'input',
18
+ 'external_dir' : 'external',
19
+ 'output_dir' : 'output',
20
+ 'output_local_dir' : 'output_local',
21
+ 'makelog' : 'log/make.log',
22
+ 'output_statslog' : 'log/output_stats.log',
23
+ 'source_maplog' : 'log/source_map.log',
24
+ 'source_statslog' : 'log/source_stats.log'
25
+ }
26
+
27
+ ### ADD EXPERIMENT NAME TO PATH
28
+ with open(PATHS['config'], 'r') as stream:
29
+ config = yaml.safe_load(stream)
30
+
31
+ PATHS["experiment_name"] = config['experiment_name']
32
+
33
+ ### LOAD GSLAB MAKE
34
+ f, path, desc = imp.find_module('gslab_make', [PATHS['lib']])
35
+ gs = imp.load_module('gslab_make', f, path, desc)
36
+
37
+ ### LOAD CONFIG USER
38
+ PATHS = gs.update_paths(PATHS)
39
+ gs.update_executables(PATHS)
40
+
41
+ ############
42
+ ### MAKE ###
43
+ ############
44
+
45
+ ### START MAKE
46
+ gs.remove_dir(['input', 'external'])
47
+ gs.clear_dir(['output', 'log'])
48
+ gs.start_makelog(PATHS)
49
+
50
+ ### GET INPUT FILES
51
+ inputs = gs.link_inputs(PATHS, ['input.txt'])
52
+ externals = gs.link_externals(PATHS, ['external.txt'])
53
+
54
+ gs.write_source_logs(PATHS, inputs + externals)
55
+ gs.get_modified_sources(PATHS, inputs + externals)
56
+
57
+ ### RUN SCRIPTS
58
+ gs.run_python(PATHS, program = 'source/run.py')
59
+ gs.run_stata(PATHS, program = 'source/prep_stata.do')
60
+
61
+ ### LOG OUTPUTS
62
+ gs.log_files_in_output(PATHS)
63
+
64
+ ### CHECK FILE SIZES
65
+ gs.check_module_size(PATHS)
66
+
67
+ ### END MAKE
68
+ gs.end_makelog(PATHS)
17/replication_package/code/data/source/__init__.py ADDED
File without changes
17/replication_package/code/data/source/build_master/__init__.py ADDED
File without changes
17/replication_package/code/data/source/build_master/builder.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime,timedelta
2
+ import pandas as pd
3
+ import os
4
+ import sys
5
+ import git
6
+ from pympler.tracker import SummaryTracker
7
+
8
+ #importing modules from root of data
9
+ root = git.Repo('.', search_parent_directories = True).working_tree_dir
10
+ sys.path.append(root)
11
+ os.chdir(os.path.join(root))
12
+
13
+ from lib.data_helpers.pull_events import PullEvents
14
+ from lib.utilities import serialize
15
+ from data.source.build_master.pullers.pull_events_use import PullEventsUse
16
+ from data.source.build_master.pullers.pull_events_alt import PullEventsAlt
17
+
18
+
19
+ from lib.data_helpers.clean_events import CleanEvents
20
+ from data.source.build_master.cleaners.clean_surveys import CleanSurveys
21
+ from data.source.build_master.cleaners.clean_events_use import CleanEventsUse
22
+ from data.source.build_master.cleaners.clean_events_status import CleanEventsStatus
23
+ from data.source.build_master.cleaners.clean_events_budget import CleanEventsBudget
24
+ from data.source.build_master.cleaners.clean_events_snooze_delays import CleanEventsSnoozeDelays
25
+ from data.source.build_master.cleaners.clean_events_snooze import CleanEventsSnooze
26
+ from data.source.build_master.cleaners.clean_events_alt import CleanEventsAlt
27
+
28
+
29
+
30
+
31
+ from lib.data_helpers.gaming import Gaming
32
+ from data.source.build_master.master_raw_user import MasterRawUser
33
+ from data.source.build_master.master_raw_user_day_app import MasterRawUserDayApp
34
+
35
+ from lib.experiment_specs import study_config
36
+
37
+ """
38
+
39
+ """
40
+ class Builder():
41
+
42
+ @staticmethod
43
+ def build_master():
44
+ tracker = SummaryTracker()
45
+
46
+ # print(f"\n Clean Survey Data {datetime.now()}")
47
+ # clean_surveys = CleanSurveys.clean_all_surveys()
48
+
49
+ # print(f"\nInitializing Master DF and add survey data {datetime.now()}")
50
+ # raw_user = MasterRawUser(initial_survey_df= clean_surveys[study_config.initial_master_survey])
51
+ # raw_user.add(clean_surveys)
52
+ # del clean_surveys
53
+
54
+ # print(f"\nCleaning Traditional Use and DetectGaming {datetime.now()}")
55
+ # trad_use_phase, trad_use_hour = Builder._build_pd_use()
56
+
57
+ # game_df = Gaming.process_gaming(error_margin=1,
58
+ # hour_use=trad_use_hour,
59
+ # raw_user_df=raw_user.raw_master_df)
60
+ # raw_user.add({"Game": game_df})
61
+
62
+ # tracker.print_diff()
63
+ # del [trad_use_phase, game_df]
64
+ # tracker.print_diff()
65
+
66
+ # if datetime.now() > study_config.surveys["Midline"]["Start"]:
67
+ # print(f"\nCleaning Limit Data {datetime.now()}")
68
+ # pd_snooze = Builder._build_pd_snooze()
69
+ # budget_phase, pd_budget = Builder._build_pd_budget()
70
+ # try:
71
+ # Builder._build_pd_snooze_delay()
72
+ # except:
73
+ # print("couldn't process snooze delay data")
74
+
75
+ # raw_user.add({"PDBudget": budget_phase})
76
+ # else:
77
+ # pd_budget = pd.DataFrame()
78
+ # pd_snooze = pd.DataFrame()
79
+
80
+ print(f"\nCleaning Traditional Use Individual {datetime.now()}")
81
+ Builder._build_pd_use_indiv()
82
+
83
+ # print(f"\n Alternative and Status Data {datetime.now()}")
84
+ # alt_use_hour, alt_use_phase = Builder._build_pd_alt(trad_use_hour)
85
+ # raw_user.add({"AltPDUse": alt_use_phase})
86
+
87
+ # clean_status, pd_latest = Builder._build_pd_status(raw_user.raw_master_df,alt_use_hour)
88
+ # raw_user.add({"LatestPD": pd_latest})
89
+ # del [alt_use_phase, pd_latest]
90
+
91
+ # print(f"\n Serialize user level data before building user-app-day data")
92
+ # config_user_dict = serialize.open_yaml("config_user.yaml")
93
+ # if config_user_dict['local']['test'] == False:
94
+ # serialize.save_pickle(raw_user.raw_master_df,
95
+ # os.path.join("data", "external", "intermediate", "MasterIntermediateUser"))
96
+
97
+ # print(f"\n Create UserXAppXDate Level data {datetime.now()}")
98
+ # MasterRawUserDayApp.build(alt_use_hour,pd_budget,pd_snooze,clean_status)
99
+
100
+ # tracker.print_diff()
101
+ # del [pd_budget,pd_snooze,alt_use_hour]
102
+
103
+ # print(f"\n Recover Old Install Data")
104
+ # PullEventsAlt.recover_install_data()
105
+ # return raw_user.raw_master_df
106
+
107
+
108
+ @staticmethod
109
+ def _build_pd_use():
110
+ pd_use_puller = PullEvents(source="PhoneDashboard",
111
+ keyword="Use",
112
+ scratch=False,
113
+ test=False,
114
+ time_cols=["Created", "Recorded"],
115
+ raw_timezone="Local",
116
+ appcode_col='Source',
117
+ identifying_cols=["AppCode", "ForegroundApp", "ScreenActive",
118
+ "CreatedDatetimeHour"],
119
+ sort_cols= ["CreatedDatetimeHour","RecordedDatetimeHour"],
120
+ drop_cols= ["PlayStoreCategory","UploadLag"],
121
+ cat_cols = ["ForegroundApp"],
122
+ compress_type="txt",
123
+ processing_func=PullEventsUse.process_raw_use)
124
+
125
+ raw_hour_use = pd_use_puller.update_data()
126
+
127
+ use_cleaner = CleanEvents(source="PhoneDashboard", keyword="Use")
128
+ use_phase, use_hour = use_cleaner.clean_events(raw_event_df=raw_hour_use,
129
+ date_col="CreatedDate",
130
+ cleaner=CleanEventsUse(use_type="Traditional"))
131
+
132
+ CleanEventsUse.get_timezones(use_hour, "CreatedDatetimeHour", "CreatedEasternDatetimeHour")
133
+
134
+
135
+ return use_phase, use_hour
136
+
137
+ @staticmethod
138
+ def _build_pd_use_indiv():
139
+ pd_use_puller = PullEvents(source="PhoneDashboard",
140
+ keyword="UseIndiv",
141
+ scratch=True,
142
+ test=False,
143
+ time_cols=["Created", "Recorded"],
144
+ raw_timezone="Local",
145
+ appcode_col='Source',
146
+ identifying_cols=["AppCode", "ForegroundApp", "StartTime", "UseMinutes"],
147
+ sort_cols= ["StartTime"],
148
+ drop_cols= ["PlayStoreCategory","UploadLag"],
149
+ cat_cols = ["ForegroundApp"],
150
+ compress_type="txt",
151
+ processing_func=PullEventsUse.process_raw_use_indiv)
152
+
153
+ raw_hour_use = pd_use_puller.update_data()
154
+
155
+ # use_cleaner = CleanEvents(source="PhoneDashboard", keyword="Use")
156
+ # use_phase, use_hour = use_cleaner.clean_events(raw_event_df=raw_hour_use,
157
+ # date_col="CreatedDate",
158
+ # cleaner=CleanEventsUse(use_type="Traditional"))
159
+
160
+ # CleanEventsUse.get_timezones(use_hour, "CreatedDatetimeHour", "CreatedEasternDatetimeHour")
161
+
162
+
163
+ @staticmethod
164
+ def _build_pd_status(raw_master: pd.DataFrame, alt_use_hour: pd.DataFrame):
165
+ pd_use_puller = PullEvents(source="PhoneDashboard",
166
+ keyword="Status",
167
+ scratch=False,
168
+ test=False,
169
+ time_cols=["LastUpload"],
170
+ raw_timezone="Local",
171
+ appcode_col='Participant',
172
+ identifying_cols=["AppCode", "Group", "Blocker",
173
+ "LastUpload", "AppVersion","PlatformVersion","PhoneModel","OptedOut"],
174
+ sort_cols = ["LastUpload"],
175
+ drop_cols = ['PhaseUseBrowser(ms)',
176
+ 'PhaseUseFB(ms)',
177
+ 'PhaseUseIG(ms)',
178
+ 'PhaseUseOverall(ms)',
179
+ 'PhaseUseSnap(ms)',
180
+ 'PhaseUseYoutube(ms)',"AsOf"],
181
+ cat_cols = [],
182
+ compress_type="txt",)
183
+
184
+ raw_status = pd_use_puller.update_data()
185
+ raw_status["LastUploadDate"] = raw_status["LastUpload"].apply(lambda x: x.date())
186
+ use_cleaner = CleanEvents(source="PhoneDashboard", keyword="Status")
187
+ clean_status = use_cleaner.clean_events(raw_event_df=raw_status,
188
+ date_col="LastUploadDate",
189
+ cleaner=CleanEventsStatus(),
190
+ phase_data=False)
191
+
192
+ pd_latest = CleanEventsStatus.get_latest_pd_health(clean_status, raw_master, alt_use_hour)
193
+ return clean_status, pd_latest
194
+
195
+ @staticmethod
196
+ def _build_pd_alt(clean_trad_use_hour):
197
+ alt_json_reader = PullEventsAlt()
198
+ pd_alt_puller = PullEvents(source="PhoneDashboard",
199
+ keyword="Alternative",
200
+ scratch=False,
201
+ test=False,
202
+ time_cols=["Created"],
203
+ raw_timezone="Local",
204
+ appcode_col='AppCode',
205
+ identifying_cols=["AppCode", "ForegroundApp", "CreatedDatetimeHour"],
206
+ sort_cols = ["Observed","CreatedDatetimeHour"],
207
+ drop_cols = ["Com.AudaciousSoftware.PhoneDashboard.AppTimeBudget", "Timezone",
208
+ "CreatedDatetime","CreatedEasternDatetime","Label", "CreatedDate",
209
+ "PlayStoreCategory","DaysObserved","Index","ZipFolder","CreatedEasternMinusLocalHours"],
210
+ cat_cols = ["ForegroundApp"],
211
+ compress_type="folder",
212
+ processing_func=alt_json_reader.process_raw_use,
213
+ file_reader=alt_json_reader.read_alt)
214
+
215
+ # This function will read in and update all types of alternative data, will only return the use data
216
+ # and will serialize all other data
217
+ raw_alt_use_hour = pd_alt_puller.update_data()
218
+ try:
219
+ combined_raw_alt_use_hour = PullEventsAlt.combine_trad_alt(raw_alt_use_hour,clean_trad_use_hour)
220
+ except:
221
+ print("could not combine trad and alt")
222
+ combined_raw_alt_use_hour = raw_alt_use_hour.copy()
223
+
224
+ use_cleaner = CleanEvents(source="PhoneDashboard", keyword="Alternative")
225
+ use_phase, use_hour = use_cleaner.clean_events(raw_event_df=combined_raw_alt_use_hour,
226
+ date_col="CreatedDate",
227
+ cleaner=CleanEventsUse(use_type="Alternative"))
228
+
229
+ config_user_dict = serialize.open_yaml("config_user.yaml")
230
+ if config_user_dict['local']['test']== False:
231
+ try:
232
+ print(f"\n Clean Alt Install data events {datetime.now()}")
233
+ CleanEventsAlt.process_appcode_files(
234
+ input_folder = os.path.join("data", "external", "input", "PhoneDashboard", "RawAltInstall"),
235
+ output_file = os.path.join("data", "external", "intermediate", "PhoneDashboard", "AltInstall"),
236
+ cleaning_function= CleanEventsAlt.clean_install
237
+ )
238
+ except:
239
+ print("could not aggregate install data")
240
+ return use_hour, use_phase
241
+
242
+ @staticmethod
243
+ def _build_pd_budget():
244
+ """processes the limit setting data"""
245
+ pd_budget_puller = PullEvents(source="PhoneDashboard",
246
+ keyword="Budget",
247
+ scratch=False,
248
+ test=False,
249
+ time_cols=["Updated","EffectiveDate"],
250
+ raw_timezone="Local",
251
+ appcode_col="Source",
252
+ identifying_cols=["AppCode", "App", "Updated", "EffectiveDate"],
253
+ sort_cols=["Updated"],
254
+ drop_cols = [],
255
+ cat_cols = [],
256
+ compress_type="txt")
257
+
258
+ pd_budget = pd_budget_puller.update_data()
259
+
260
+ budget_cleaner = CleanEvents(source="PhoneDashboard", keyword="Budget")
261
+ clean_budget = budget_cleaner.clean_events(raw_event_df=pd_budget,
262
+ date_col="EffectiveDate",
263
+ cleaner=CleanEventsBudget(),
264
+ phase_data = False)
265
+
266
+ budget_sum = CleanEventsBudget.get_latest_budget_data(clean_budget)
267
+
268
+ return budget_sum, clean_budget
269
+
270
+ @staticmethod
271
+ def _build_pd_snooze_delay():
272
+ """process the custom snooze data (post study functionality)"""
273
+ pd_snooze_delay_puller = PullEvents(source="PhoneDashboard",
274
+ keyword="Delays",
275
+ scratch = False,
276
+ test = False,
277
+ time_cols=["UpdatedDatetime", "EffectiveDatetime"],
278
+ raw_timezone = "Local",
279
+ appcode_col="App Code",
280
+ identifying_cols=["AppCode", "SnoozeDelay", "UpdatedDatetime"],
281
+ sort_cols = ["UpdatedDatetime"],
282
+ drop_cols= [],
283
+ cat_cols = [],
284
+ compress_type="txt")
285
+
286
+ raw_delayed_snooze = pd_snooze_delay_puller.update_data()
287
+ snooze_delay_cleaner = CleanEvents(source="PhoneDashboard", keyword="Delays")
288
+
289
+ clean_delays = snooze_delay_cleaner.clean_events(raw_event_df=raw_delayed_snooze,
290
+ date_col= "EffectiveDate",
291
+ cleaner= CleanEventsSnoozeDelays(),
292
+ phase_data=False)
293
+
294
+ clean_delays.to_csv(os.path.join("data","external", "intermediate", "PhoneDashboard", "Delays.csv"))
295
+
296
+
297
+
298
+ @staticmethod
299
+ def _build_pd_snooze():
300
+ """processes the snooze event data"""
301
+ pd_snooze_puller = PullEvents(source="PhoneDashboard",
302
+ keyword="Snooze",
303
+ scratch = False,
304
+ test = False,
305
+ time_cols=["Recorded", "Created"],
306
+ raw_timezone = "Local",
307
+ appcode_col="Source",
308
+ identifying_cols=["AppCode", "App", "Event", "Created"],
309
+ sort_cols = ["Created"],
310
+ drop_cols= [],
311
+ cat_cols = [],
312
+ compress_type="txt")
313
+
314
+ raw_snooze = pd_snooze_puller.update_data()
315
+
316
+ snooze_cleaner = CleanEvents(source="PhoneDashboard", keyword="Snooze")
317
+
318
+ pd_snooze = snooze_cleaner.clean_events(raw_event_df=raw_snooze,
319
+ date_col= "Date",
320
+ cleaner= CleanEventsSnooze(),
321
+ phase_data=False)
322
+
323
+ CleanEventsSnooze.get_premature_blocks(pd_snooze)
324
+
325
+ return pd_snooze
326
+
327
+ if __name__ == "__main__":
328
+ pd_snooze = Builder._build_pd_snooze_delay()
17/replication_package/code/data/source/build_master/cleaners/clean_events_alt.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import git
4
+ import sys
5
+ import pandas as pd
6
+
7
+ #importing modules from root of data
8
+ root = git.Repo('.', search_parent_directories = True).working_tree_dir
9
+ sys.path.append(root)
10
+ os.chdir(os.path.join(root))
11
+
12
+ from lib.experiment_specs import study_config
13
+ from lib.data_helpers import data_utils
14
+
15
+ from lib.data_helpers.builder_utils import BuilderUtils
16
+ from lib.utilities import serialize
17
+
18
+ class CleanEventsAlt():
19
+
20
+ @staticmethod
21
+ def process_appcode_files(input_folder,output_file,cleaning_function):
22
+ """
23
+ inputs:
24
+ - input_folder: directory where all pickle files will be read and appeneded
25
+ - outpul_file: the directory where the output file will be saves
26
+ - cleaning_function: the function used to clean the aggregated data
27
+ """
28
+ appcodes = [x for x in os.listdir(input_folder) if ".pickle" in x]
29
+ df_list = []
30
+ print(appcodes[:5])
31
+ for appcode in appcodes:
32
+ path = os.path.join(input_folder, appcode)
33
+ try:
34
+ a_df = serialize.open_pickle(path)
35
+ if len(a_df) == 0:
36
+ continue
37
+ df_list.append(a_df)
38
+
39
+ #try:
40
+ # d = serialize.open_hdf(path.replace(".pickle",".h5"))
41
+ #except:
42
+ # print(f"could not open {appcode} h5 file!!!!, but pickle opened without problems")
43
+
44
+ except:
45
+ print(f"could not read {appcode} raw install pickle data")
46
+
47
+ if len(df_list) > 0:
48
+ df = pd.concat(df_list).reset_index(drop=True)
49
+ df = cleaning_function(df)
50
+
51
+ try:
52
+ serialize.save_hdf(df, output_file)
53
+ except:
54
+ print("Couldn't save hdf")
55
+
56
+ try:
57
+ df.to_csv(output_file + ".csv", index=False)
58
+ except:
59
+ print("couldn't save csv file")
60
+
61
+
62
+ @staticmethod
63
+ def clean_install(a_df):
64
+ # add column to indiciate if app is FITSBY
65
+ a_df = data_utils.add_A_to_appcode(a_df, "AppCode")
66
+ duplicate_fitsby_apps = pd.read_excel(os.path.join("lib", "experiment_specs", "FITSBY_apps.xlsx"))
67
+ a_df = a_df.merge(duplicate_fitsby_apps, on='App', how='left')
68
+ a_df = a_df.drop_duplicates(subset = ["AppCode","App","Date"])
69
+ return a_df
70
+
71
+ ################
72
+ #####OLD#########
73
+ ###############
74
+ @staticmethod
75
+ def process_appcode_files_OLD(input_folder,output_file,cleaning_function):
76
+ appcodes = [x for x in os.listdir(input_folder) if ".pickle" in x]
77
+ df_list = []
78
+ for appcode in appcodes:
79
+ path = os.path.join(input_folder, appcode)
80
+ a_dict = serialize.open_pickle(path, df_bool=False)
81
+ if len(a_dict) == 0:
82
+ continue
83
+
84
+ a_df = cleaning_function(a_dict)
85
+ a_df["AppCode"] = "A" + appcode.replace(".pickle", "")
86
+ df_list.append(a_df)
87
+
88
+ if len(df_list)>0:
89
+ df = pd.concat(df_list).reset_index(drop = True)
90
+ try:
91
+ serialize.save_pickle(df, output_file)
92
+ except:
93
+ print("Couldn't save Pickle")
94
+
95
+ try:
96
+ # DONT PUT IN TRY BECAUSE IF BACKUP FAILS, WE WANT TO RE PROCESS THE NEW FILES
97
+ df.to_csv(output_file+".csv", index=False, compression='gzip')
98
+ except:
99
+ print("couldn't save zip file")
100
+
101
+ return df
102
+
103
+ else:
104
+ print("no alt data yet!")
105
+ return pd.DataFrame()
106
+
107
+ @staticmethod
108
+ def clean_alt_block_data(a_dict):
109
+ a_df = pd.DataFrame.from_dict(a_dict, orient='index').reset_index().rename(columns={"index": "Created", "app": "App"})
110
+ a_df = data_utils.clean_iso_dates(a_df, 'Created')
111
+ a_df["AltLimitMinutes"] = a_df["time_budget"] / (1000 * 60)
112
+ a_df["AltUseMinutesAtEvent"] = a_df["time_usage"] / (1000 * 60)
113
+ return a_df
114
+
115
+ @staticmethod
116
+ def clean_warnings(a_dict):
117
+ df = pd.DataFrame.from_dict(a_dict, orient='index').reset_index().rename(columns={"date": "Created"}).drop(
118
+ columns='index')
119
+ df = data_utils.clean_iso_dates(df, 'Created')
120
+
121
+ df['details_dict'] = df['details'].apply(lambda x: json.loads(x))
122
+ chars_df = df["details_dict"].apply(pd.Series)
123
+ assert len(chars_df) == len(df)
124
+ df = pd.concat([df, chars_df], axis=1)
125
+ df = df.drop(columns=["details_dict", "details"])
126
+
127
+ df = df.rename(columns = {"event": "Event",
128
+ "minutes-remaining": "MinutesRemaining",
129
+ "package": "App",
130
+ "snooze-delay": "SnoozeDelay",
131
+ "snooze-minutes": "SnoozeMinutes"})
132
+
133
+ event_rename = {"app-block-warning": "App Warning Displayed",
134
+ "app-blocked-can-snooze": "App Blocked - Snooze Offered",
135
+ "app-blocked-delayed": "App Blocked Until Delay Elapsed",
136
+ "app-blocked-no-snooze": "App Blocked - Snooze Unavailable",
137
+ "app-blocked-no-snooze-closed": "User Closed App Blocked (No Snooze) Warning",
138
+ "cancelled-snooze": "User Cancelled Snooze",
139
+ "closed-delay-warning": "User Closed Delay Warning",
140
+ "closed-warning": "User Closed Warning",
141
+ "skipped-snooze": "User Declined Snooze",
142
+ "snoozed-app-limit": "Snooze Enabled"}
143
+ df["Event"] = df["Event"].apply(lambda x: event_rename[x])
144
+ return df
145
+
146
+ if __name__ == "__main__":
147
+ input_folder = os.path.join("data","external","input","PhoneDashboard","RawAltInstall")
148
+ output_file = os.path.join("data", "external", "intermediate", "PhoneDashboard", "AltInstall")
149
+ CleanEventsAlt.process_appcode_files(input_folder,output_file,CleanEventsAlt.clean_install)
150
+ print('donzo')
17/replication_package/code/data/source/build_master/cleaners/clean_events_budget.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from datetime import datetime, timedelta
4
+ import numpy as np
5
+ import pandas as pd
6
+
7
+ from lib.experiment_specs import study_config
8
+ from lib.data_helpers import data_utils
9
+ from lib.utilities import codebook
10
+ from lib.utilities import serialize
11
+
12
+ from lib.data_helpers.builder_utils import BuilderUtils
13
+
14
+ """"
15
+ The new use cleaner, which will deprecate phone_use_cleaner, and phase_use
16
+ """
17
+ class CleanEventsBudget():
18
+
19
+ clean_file = os.path.join("data","external","intermediate","PhoneDashboard","CleanBudget")
20
+
21
+ def prep_clean(self, df):
22
+ # Update Use Data
23
+ df["SawLimitSettingPage"] = True
24
+
25
+ df.loc[df["App"] != "placeholder.app.does.not.exist","HasSetLimit"] = True
26
+ #df = df.loc[df["App"] != "placeholder.app.does.not.exist"]
27
+ df["EffectiveDate"] = df["EffectiveDate"].dt.date
28
+ df = df.loc[(df["EffectiveDate"] >= study_config.surveys["Midline"]["Start"].date())]
29
+
30
+ return df
31
+
32
+ """Called in the Event Cleaner, after the data has been subsetted to a given phase"""
33
+ def phase_clean(self, df, phase):
34
+ # prep
35
+ #summarize
36
+ ## print("hello")
37
+ return df
38
+
39
+ @staticmethod
40
+ def get_latest_budget_data(clean_budget_df):
41
+ df = clean_budget_df[["AppCode", "HasSetLimit", "SawLimitSettingPage"]].groupby(
42
+ ["AppCode"]).first().reset_index()
43
+
44
+ apps = clean_budget_df.groupby(["AppCode", "App"])["NewLimit"].last().reset_index()
45
+ apps = apps.loc[apps["App"].isin(study_config.fitsby)]
46
+ apps["LimitMinutes"] = apps["NewLimit"] / (60 * 1000)
47
+ apps["App"] = apps["App"].apply(lambda x: x.capitalize())
48
+
49
+ apps_p = apps.pivot_table(index=["AppCode"],
50
+ values=["LimitMinutes"],
51
+ columns=["App"],
52
+ aggfunc='first')
53
+ apps_p.columns = [''.join(col[::1]).strip() for col in apps_p.columns.values]
54
+ apps_p = apps_p.reset_index()
55
+
56
+ df = df.merge(apps_p, on = "AppCode", how = "left")
57
+ return df
58
+
17/replication_package/code/data/source/build_master/cleaners/clean_events_pc.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from datetime import datetime, timedelta
4
+
5
+ from lib.data_helpers.builder_utils import BuilderUtils
6
+ from lib.utilities import serialize
7
+
8
+ class CleanEventsPC():
9
+
10
+ def __init__(self):
11
+ self.social_hosts = []
12
+ self.use_subsets = {
13
+ "WebDesktop": {
14
+ "Filters": {"WebBool":[True]},
15
+ "DenomCol": "DaysWithWeb",
16
+ "NumCols": ["UseMinutes"]},
17
+
18
+ "FBDesktop" :{
19
+ "Filters": {"FBBool": [True]},
20
+ "DenomCol": "DaysWithWeb",
21
+ "NumCols": ["UseMinutes"]},
22
+
23
+ "IGDesktop":{
24
+ "Filters": {"IGBool": [True]},
25
+ "DenomCol": "DaysWithWeb",
26
+ "NumCols": ["UseMinutes"]}
27
+ }
28
+
29
+
30
+ def prep_clean(self,df):
31
+ # Rename some things
32
+ df["UseMinutes"] = df["Duration"]/60
33
+ df = df.drop(columns = ["Duration"])
34
+
35
+ # create date variable
36
+ df["StartedOnIsoDate"] = df["StartedOnIso"].apply(lambda x: x.date())
37
+ df["EndedOnIsoDatetime"] = df["StartedOnIso"]+df["UseMinutes"].apply(lambda x: timedelta(seconds = x*60))
38
+
39
+ # label treatment webistes
40
+ df.loc[df["Website"].notnull(), "WebBool"] = True
41
+ df.loc[df["Website"].fillna("nan").str.contains("facebook"), "FBBool"] = True
42
+ df.loc[df["Website"].fillna("nan").str.contains("instagram"), "IGBool"] = True
43
+
44
+ # Create List of hosts, ordered by popularity
45
+ top_hosts = df.groupby(['Website'])['AsOf'].agg(['count'])
46
+ top_hosts = top_hosts.rename(columns={'count': "WebsiteVisitCount"}).reset_index().sort_values(
47
+ by="WebsiteVisitCount", ascending=False)
48
+ top_hosts.to_csv(os.path.join("data","external","intermediate","PCDashboard", "TopSites.csv"),
49
+ index=False)
50
+
51
+ # get social hosts
52
+ self.social_hosts = [y for y in list(top_hosts["Website"]) if any(x in y for x in study_config.social_websites)]
53
+ return df
54
+
55
+ """Called in the Event Cleaner, after the data has been subsetted to a given phase"""
56
+ def phase_clean(self, df, phase):
57
+ df["WebDay"] = df["StartedOnIso"].apply(lambda x: x.date())
58
+ df.loc[:, "DaysWithWeb"] = df.groupby(by=['AppCode'])['WebDay'].transform(lambda x: x.nunique())
59
+ df = BuilderUtils.get_subsets_avg_use(df, self.use_subsets)
60
+ return df
17/replication_package/code/data/source/build_master/cleaners/clean_events_snooze.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib.data_helpers.builder_utils import BuilderUtils
2
+ import os
3
+ from datetime import datetime,timedelta
4
+ from lib.experiment_specs import study_config
5
+
6
+ """"
7
+ The new use cleaner, which will deprecate phone_use_cleaner, and phase_use
8
+ """
9
+ class CleanEventsSnooze():
10
+
11
+ clean_file = os.path.join("data","external","intermediate","PhoneDashboard","CleanSnooze")
12
+
13
+ def prep_clean(self, df):
14
+ df["Date"] = df["Created"].dt.date
15
+ df = df.loc[(df["Date"] >= study_config.surveys["Midline"]["Start"].date())]
16
+ return df
17
+
18
+ @staticmethod
19
+ def get_premature_blocks(sn):
20
+ ud_s = sn.groupby(["AppCode", "Date", "App"]).first().reset_index()
21
+
22
+ bug = ud_s.loc[~ud_s["Event"].isin(["App Warning Displayed"])]
23
+ bug = bug.loc[bug["Created"] > datetime(2020, 5, 2, 0, 0), ["AppCode", "Created", "Date", "App", "Event",
24
+ "SnoozeExtension"]]
25
+ print(bug["Event"].value_counts())
26
+
27
+ # look for user-days for which last event was a display that the user didn't close
28
+ ud_l = sn.groupby(["AppCode", "Date", "App"]).last().reset_index()
29
+ b_p = ud_l.loc[ud_l["Event"].isin(["App Blocked - Snooze Offered",
30
+ "App Blocked - Snooze Unavailable",
31
+ "App Warning Displayed"])]
32
+ b_p["NextDate"] = b_p["Date"].apply(lambda x: x + timedelta(1))
33
+ b_p = b_p.rename(columns={"Event": "YesterdayEvent", "App": "YesterdayApp",
34
+ "Created": "YesterdayCreated",
35
+ "SnoozeExtension": "YesterdaySnoozeExtension"})
36
+
37
+ bug = bug.merge(b_p[["AppCode", "YesterdayApp", "NextDate", "YesterdayEvent", "YesterdayCreated",
38
+ "YesterdaySnoozeExtension"]],
39
+ right_on=["AppCode", "NextDate", "YesterdayApp"],
40
+ left_on=["AppCode", "Date", "App"],
41
+ how='left')
42
+
43
+ bug.to_csv(os.path.join("data","external","intermediate","Scratch","3b_SnoozeEvent.csv"))
17/replication_package/code/data/source/build_master/cleaners/clean_events_snooze_delays.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ class CleanEventsSnoozeDelays():
3
+
4
+ def __init__(self):
5
+ empty = []
6
+
7
+ def prep_clean(self,df):
8
+ # Rename some things
9
+ df['EffectiveDate'] = df['EffectiveDatetime'].apply(lambda x: x.date())
10
+ return df
11
+
12
+ """Called in the Event Cleaner, after the data has been subsetted to a given phase"""
13
+ def phase_clean(self, df, phase):
14
+ return df
15
+
16
+
17/replication_package/code/data/source/build_master/cleaners/clean_events_status.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from lib.data_helpers.builder_utils import BuilderUtils
4
+ from lib.utilities import serialize
5
+ from lib.experiment_specs import study_config
6
+ from lib.data_helpers import data_utils
7
+ from datetime import datetime, timedelta
8
+
9
+ class CleanEventsStatus():
10
+
11
+ def __init__(self):
12
+ self.social_hosts = []
13
+
14
+ def prep_clean(self,df):
15
+ # Rename some things
16
+ return df
17
+
18
+ """Called in the Event Cleaner, after the data has been subsetted to a given phase"""
19
+ def phase_clean(self, df, phase):
20
+ #df = df.sort_values(by = ["LastUpload"])
21
+ #df_l = df.groupby(["AppCode"]).last().reset_index()
22
+ return df
23
+
24
+ @staticmethod
25
+ def get_latest_pd_health(st, mr, uah):
26
+ #OptedOut represents if the opted out of limits, and EmailEnabled suggests if Chris sends user automatic emails
27
+ # about missing data
28
+
29
+ keep_cols = ["AppCode","PhoneModel","AppVersion","PlatformVersion","LastUpload","Server",
30
+ "OptedOut","E-MailEnabled"]
31
+ st_l = st.sort_values(["LastUpload"]).groupby(["AppCode"]).last().reset_index()
32
+ st_l["Server"] = st_l["Zipfile"].apply(lambda x: x.split("_")[1] if "_" in x else "nan")
33
+ st_l = st_l[keep_cols]
34
+
35
+ lt = uah.loc[uah["CreatedDate"] >= study_config.active_threshold].groupby(["AppCode"])["UseMinutes"].sum()
36
+ l = st_l.merge(lt, on = "AppCode", how = 'outer')
37
+
38
+
39
+ ############
40
+ #status indicates how pd data looks since study_config.active_threshold
41
+ ############
42
+ last_survey_complete = data_utils.get_last_survey()
43
+ code = study_config.surveys[last_survey_complete]["Code"]
44
+
45
+ #only code latest status for people that completed the last survey that ended
46
+ l = l.merge(mr.loc[mr[f"{code}_Complete"]=="Complete",["AppCode",f"{code}_Complete"]], on = "AppCode", how = 'right')
47
+
48
+ # has use data in past few days
49
+ l.loc[l["UseMinutes"]>0, "ActiveStatus"] = "Normal"
50
+
51
+ #i.e. no use data, but is status export
52
+ l.loc[l["ActiveStatus"].isnull(), "ActiveStatus"] = "NoUseDataLately"
53
+
54
+ #is also missing pd status data
55
+ l.loc[(l["ActiveStatus"]=="NoUseDataLately") & (l["PhoneModel"].isnull()),"ActiveStatus"]= "NoPDDataAtAll"
56
+
57
+ l = l[keep_cols+["ActiveStatus"]]
58
+
59
+ return l