anonymous-submission-acl2025 commited on
Commit
e9b22d2
·
1 Parent(s): a204f16
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 30/paper.pdf +3 -0
  2. 30/replication_package/Adofiles/DCdensity_2009/DCdensity.ado +440 -0
  3. 30/replication_package/Adofiles/rd_2021/rdbwdensity.ado +342 -0
  4. 30/replication_package/Adofiles/rd_2021/rdbwdensity.sthlp +147 -0
  5. 30/replication_package/Adofiles/rd_2021/rdbwselect.ado +679 -0
  6. 30/replication_package/Adofiles/rd_2021/rdbwselect.sthlp +275 -0
  7. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014.ado +596 -0
  8. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014.sthlp +135 -0
  9. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014_cvplot.mo +0 -0
  10. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014_kconst.ado +885 -0
  11. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014_kweight.mo +0 -0
  12. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014_rdvce.mo +0 -0
  13. 30/replication_package/Adofiles/rd_2021/rdbwselect_2014_regconst.mo +0 -0
  14. 30/replication_package/Adofiles/rd_2021/rddensity.ado +1406 -0
  15. 30/replication_package/Adofiles/rd_2021/rddensity.sthlp +450 -0
  16. 30/replication_package/Adofiles/rd_2021/rddensity_fv.mo +0 -0
  17. 30/replication_package/Adofiles/rd_2021/rddensity_h.mo +0 -0
  18. 30/replication_package/Adofiles/rd_2021/rddensity_quantile.mo +0 -0
  19. 30/replication_package/Adofiles/rd_2021/rddensity_rep.mo +0 -0
  20. 30/replication_package/Adofiles/rd_2021/rddensity_unique.mo +0 -0
  21. 30/replication_package/Adofiles/rd_2021/rdplot.ado +796 -0
  22. 30/replication_package/Adofiles/rd_2021/rdplot.sthlp +222 -0
  23. 30/replication_package/Adofiles/rd_2021/rdrobust.ado +1009 -0
  24. 30/replication_package/Adofiles/rd_2021/rdrobust.sthlp +309 -0
  25. 30/replication_package/Adofiles/rd_2021/rdrobust_bw.mo +0 -0
  26. 30/replication_package/Adofiles/rd_2021/rdrobust_kweight.mo +0 -0
  27. 30/replication_package/Adofiles/rd_2021/rdrobust_res.mo +0 -0
  28. 30/replication_package/Adofiles/rd_2021/rdrobust_vce.mo +0 -0
  29. 30/replication_package/Adofiles/reghdfe_2019/reghdfe.ado +539 -0
  30. 30/replication_package/Adofiles/reghdfe_2019/reghdfe.mata +62 -0
  31. 30/replication_package/Adofiles/reghdfe_2019/reghdfe.sthlp +801 -0
  32. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_accelerations.mata +323 -0
  33. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_bipartite.mata +546 -0
  34. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_class.mata +1384 -0
  35. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_common.mata +838 -0
  36. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_constructor.mata +286 -0
  37. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_estat.ado +36 -0
  38. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_footnote.ado +60 -0
  39. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_header.ado +181 -0
  40. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_lsmr.mata +235 -0
  41. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_mata.sthlp +346 -0
  42. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_old.ado +0 -0
  43. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_old.sthlp +872 -0
  44. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_old_estat.ado +32 -0
  45. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_old_footnote.ado +113 -0
  46. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_old_p.ado +99 -0
  47. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_p.ado +78 -0
  48. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_parse.ado +139 -0
  49. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_projections.mata +166 -0
  50. 30/replication_package/Adofiles/reghdfe_2019/reghdfe_store_alphas.ado +29 -0
30/paper.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a4d6d6dfe479cf46d6d94f1f1c9f35333e47722eb11986151953e99e7aecd79
3
+ size 523522
30/replication_package/Adofiles/DCdensity_2009/DCdensity.ado ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //Notes:
2
+ // This ado file was created by Brian Kovak, a Ph.D. student at the University
3
+ // of Michigan, under the direction of Justin McCrary. McCrary made some
4
+ // cosmetic alterations to the code, added some further error traps, and
5
+ // ran some simulations to ensure that
6
+ // there was no glitch in implementation. This file is not the basis for
7
+ // the estimates in McCrary (2008), however.
8
+
9
+ // The purpose of the file is to create a STATA command, -DCdensity-, which
10
+ // will allow for ready estimation of a discontinuous density function, as
11
+ // outlined in McCrary (2008), "Manipulation of the Running Variable in the
12
+ // Regression Discontinuity Design: A Density Test", Journal of Econometrics.
13
+
14
+ // The easiest way to use the file is to put it in your ado subdirectory. If
15
+ // you don't know where that is, try using -sysdir- at the Stata prompt.
16
+
17
+ // A feature of the program is that it is much faster than older STATA routines
18
+ // (e.g., -kdensity-). The source of the speed improvements is the use of
19
+ // MATA for both looping and for estimation of the regressions, and the lack of
20
+ // use of -preserve-.
21
+
22
+ // An example program showing how to use -DCdensity- is given in the file
23
+ // DCdensity_example.do
24
+
25
+ // JRM, 9/2008
26
+
27
+ // Update: Fixed bug that occurs when issuing something like
28
+ // DCdensity Z if female==1, breakpoint(0) generate(Xj Yj r0 fhat se_fhat) graphname(DCdensity_example.eps)
29
+
30
+ // Update 11.17.2009: Fixed bugs in XX matrix (see comments) and in hright (both code typos)
31
+
32
+
33
+ capture program drop DCdensity
34
+ program DCdensity, rclass
35
+ {
36
+ version 9.0
37
+ set more off
38
+ pause on
39
+ syntax varname(numeric) [if/] [in/], breakpoint(real) GENerate(string) ///
40
+ [ b(real 0) h(real 0) at(string) graphname(string) noGRaph]
41
+
42
+ marksample touse
43
+
44
+ //Advanced user switch
45
+ //0 - supress auxiliary output 1 - display aux output
46
+ local verbose 1
47
+
48
+ //Bookkeeping before calling MATA function
49
+ //"running variable" in terminology of McCrary (2008)
50
+ local R "`varlist'"
51
+
52
+ tokenize `generate'
53
+ local wc : word count `generate'
54
+ if (`wc'!=5) {
55
+ //generate(Xj Yj r0 fhat se_fhat) is suggested
56
+ di "Specify names for five variables in generate option"
57
+ di "1. Name of variable in which to store cell midpoints of histogram"
58
+ di "2. Name of variable in which to store cell heights of histogram"
59
+ di "3. Name of variable in which to store evaluation sequence for local linear regression loop"
60
+ di "4. Name of variable in which to store local linear density estimate"
61
+ di "5. Name of variable in which to store standard error of local linear density estimate"
62
+ error 198
63
+ }
64
+ else {
65
+ local cellmpname = "`1'"
66
+ local cellvalname = "`2'"
67
+ local evalname = "`3'"
68
+ local cellsmname = "`4'"
69
+ local cellsmsename = "`5'"
70
+ confirm new var `1'
71
+ confirm new var `2'
72
+ capture confirm new var `3'
73
+ if (_rc!=0 & "`at'"!="`3'") error 198
74
+ confirm new var `4'
75
+ confirm new var `5'
76
+ }
77
+
78
+ //If the user does not specify the evaluation sequence, this it is taken to be the histogram midpoints
79
+ if ("`at'" == "") {
80
+ local at = "`1'"
81
+ }
82
+
83
+ //Call MATA function
84
+ mata: DCdensitysub("`R'", "`touse'", `breakpoint', `b', `h', `verbose', "`cellmpname'", "`cellvalname'", ///
85
+ "`evalname'", "`cellsmname'", "`cellsmsename'", "`at'")
86
+
87
+ //Dump MATA return codes into STATA return codes
88
+ return scalar theta = r(theta)
89
+ return scalar se = r(se)
90
+ return scalar binsize = r(binsize)
91
+ return scalar bandwidth = r(bandwidth)
92
+
93
+ //if user wants the graph...
94
+ if ("`graph'"!="nograph") {
95
+ tempvar hi
96
+ quietly gen `hi' = `cellsmname' + 1.96*`cellsmsename'
97
+ tempvar lo
98
+ quietly gen `lo' = `cellsmname' - 1.96*`cellsmsename'
99
+ gr twoway (scatter `cellvalname' `cellmpname', msymbol(circle_hollow) mcolor(gray)) ///
100
+ (line `cellsmname' `evalname' if `evalname' < `breakpoint', lcolor(black) lwidth(medthick)) ///
101
+ (line `cellsmname' `evalname' if `evalname' > `breakpoint', lcolor(black) lwidth(medthick)) ///
102
+ (line `hi' `evalname' if `evalname' < `breakpoint', lcolor(black) lwidth(vthin)) ///
103
+ (line `lo' `evalname' if `evalname' < `breakpoint', lcolor(black) lwidth(vthin)) ///
104
+ (line `hi' `evalname' if `evalname' > `breakpoint', lcolor(black) lwidth(vthin)) ///
105
+ (line `lo' `evalname' if `evalname' > `breakpoint', lcolor(black) lwidth(vthin)), ///
106
+ xline(`breakpoint', lcolor(black)) ylabel(0(0.0005)0.001,labsize(huge)) xlabel(`breakpoint', labsize(huge)) graphregion(color(white)) legend(off)
107
+ if ("`graphname'"!="") {
108
+ di "Exporting graph as `graphname'"
109
+ graph export `graphname', replace
110
+ }
111
+ }
112
+ }
113
+ end
114
+
115
+
116
+ mata:
117
+ mata set matastrict on
118
+
119
+ void DCdensitysub(string scalar runvar, string scalar tousevar, real scalar c, real scalar b, ///
120
+ real scalar h, real scalar verbose, string scalar cellmpname, string scalar cellvalname, ///
121
+ string scalar evalname, string scalar cellsmname, string scalar cellsmsename, ///
122
+ string scalar atname) {
123
+ // inputs: runvar - name of stata running variable ("R" in McCrary (2008))
124
+ // tousevar - name of variable indicating which obs to use
125
+ // c - point of potential discontinuity
126
+ // b - bin size entered by user (zero if default is to be used)
127
+ // h - bandwidth entered by user (zero if default is to be used)
128
+ // verbose - flag for extra messages printing to screen
129
+ // cellmpname - name of new variable that will hold the histogram cell midpoints
130
+ // cellvalname - name of new variable that will hold the histogram values
131
+ // evalname - name of new variable that will hold locations where the histogram smoothing was
132
+ // evaluated
133
+ // cellsmname - name of new variable that will hold the smoothed histogram cell values
134
+ // cellsmsename - name of new variable that will hold standard errors for smoothed histogram cells
135
+ // atname - name of existing stata variable holding points at which to eval smoothed histogram
136
+
137
+ //declarations for general use and histogram generation
138
+ real colvector run // stata running variable
139
+ string scalar statacom // string to hold stata commands
140
+ real scalar errcode // scalar to hold return code for stata commands
141
+ real scalar rn, rsd, rmin, rmax, rp75, rp25, riqr // scalars for summary stats of running var
142
+ real scalar l, r // midpoint of lowest bin and highest bin in histogram
143
+ real scalar lc, rc // midpoint of bin just left of and just right of breakpoint
144
+ real scalar j // number of bins spanned by running var
145
+ real colvector binnum // each obs bin number
146
+ real colvector cellval // histogram cell values
147
+ real scalar i // counter
148
+ real scalar cellnum // cell value holder for histogram generation
149
+ real colvector cellmp // histogram cell midpoints
150
+
151
+ //Set up histogram grid
152
+
153
+ st_view(run, ., runvar, tousevar) //view of running variable--only observations for which `touse'=1
154
+
155
+ //Get summary stats on running variable
156
+ statacom = "quietly summarize " + runvar + " if " + tousevar + ", det"
157
+ errcode=_stata(statacom,1)
158
+ if (errcode!=0) {
159
+ "Unable to successfully execute the command "+statacom
160
+ "Check whether you have given Stata enough memory"
161
+ }
162
+ rn = st_numscalar("r(N)")
163
+ rsd = st_numscalar("r(sd)")
164
+ rmin = st_numscalar("r(min)")
165
+ rmax = st_numscalar("r(max)")
166
+ rp75 = st_numscalar("r(p75)")
167
+ rp25 = st_numscalar("r(p25)")
168
+ riqr = rp75 - rp25
169
+
170
+ if ( (c<=rmin) | (c>=rmax) ) {
171
+ printf("Breakpoint must lie strictly within range of running variable\n")
172
+ _error(3498)
173
+ }
174
+
175
+ //set bin size to default in paper sec. III.B unless provided by the user
176
+ if (b == 0) {
177
+ b = 2*rsd*rn^(-1/2)
178
+ if (verbose) printf("Using default bin size calculation, bin size = %f\n", b)
179
+ }
180
+
181
+ //bookkeeping
182
+ l = floor((rmin-c)/b)*b+b/2+c // midpoint of lowest bin in histogram
183
+ r = floor((rmax-c)/b)*b+b/2+c // midpoint of lowest bin in histogram
184
+ lc = c-(b/2) // midpoint of bin just left of breakpoint
185
+ rc = c+(b/2) // midpoint of bin just right of breakpoint
186
+ j = floor((rmax-rmin)/b)+2
187
+
188
+ //create bin numbers corresponding to run... See McCrary (2008, eq 2)
189
+ binnum = round((((floor((run :- c):/b):*b:+b:/2:+c) :- l):/b) :+ 1) // bin number for each obs
190
+
191
+ //generate histogram
192
+ cellval = J(j,1,0) // initialize cellval as j-vector of zeros
193
+ for (i = 1; i <= rn; i++) {
194
+ cellnum = binnum[i]
195
+ cellval[cellnum] = cellval[cellnum] + 1
196
+ }
197
+
198
+ cellval = cellval :/ rn // convert counts into fractions
199
+ cellval = cellval :/ b // normalize histogram to integrate to 1
200
+ cellmp = range(1,j,1) // initialize cellmp as vector of integers from 1 to j
201
+ cellmp = floor(((l :+ (cellmp:-1):*b):-c):/b):*b:+b:/2:+c // convert bin numbers into cell midpoints
202
+
203
+ //place histogram info into stata data set
204
+ real colvector stcellval // stata view for cell value variable
205
+ real colvector stcellmp // stata view for cell midpoint variable
206
+
207
+ (void) st_addvar("float", cellvalname)
208
+ st_view(stcellval, ., cellvalname)
209
+ (void) st_addvar("float", cellmpname)
210
+ st_view(stcellmp, ., cellmpname)
211
+ stcellval[|1\j|] = cellval
212
+ stcellmp[|1\j|] = cellmp
213
+
214
+ //Run 4th order global polynomial on histogram to get optimal bandwidth (if necessary)
215
+ real matrix P // projection matrix returned from orthpoly command
216
+ real matrix betaorth4 // coeffs from regression of orthogonal powers of cellmp
217
+ real matrix beta4 // coeffs from normal regression of powers of cellmp
218
+ real scalar mse4 // mean squared error from polynomial regression
219
+ real scalar hleft, hright // bandwidth est from polynomial left of and right of breakpoint
220
+ real scalar leftofc, rightofc // bin number just left of and just right of breakpoint
221
+ real colvector cellmpleft, cellmpright // cell midpoints left of and right of breakpoint
222
+ real colvector fppleft, fppright // fit second deriv of hist left of and right of breakpoint
223
+
224
+ //only calculate optimal bandwidth if user hasn't provided one
225
+ if (h == 0) {
226
+ //separate cells left of and right of the cutoff
227
+ leftofc = round((((floor((lc - c)/b)*b+b/2+c) - l)/b) + 1) // bin number just left of breakpoint
228
+ rightofc = round((((floor((rc - c)/b)*b+b/2+c) - l)/b) + 1) // bin number just right of breakpoint
229
+ if (rightofc-leftofc != 1) {
230
+ printf("Error occurred in optimal bandwidth calculation\n")
231
+ _error(3498)
232
+ }
233
+ cellmpleft = cellmp[|1\leftofc|]
234
+ cellmpright = cellmp[|rightofc\j|]
235
+
236
+ //estimate 4th order polynomial left of the cutoff
237
+ statacom = "orthpoly " + cellmpname + ", generate(" + cellmpname + "*) deg(4) poly(P)"
238
+ errcode=_stata(statacom,1)
239
+ if (errcode!=0) {
240
+ "Unable to successfully execute the command "+statacom
241
+ "Check whether you have given Stata enough memory"
242
+ }
243
+ P = st_matrix("P")
244
+ statacom = "reg " + cellvalname + " " + cellmpname + "1-" + cellmpname + "4 if " + cellmpname + " < " + strofreal(c)
245
+ errcode=_stata(statacom,1)
246
+ if (errcode!=0) {
247
+ "Unable to successfully execute the command "+statacom
248
+ "Check whether you have given Stata enough memory"
249
+ }
250
+ mse4 = st_numscalar("e(rmse)")^2
251
+ betaorth4 = st_matrix("e(b)")
252
+ beta4 = betaorth4 * P
253
+ fppleft = 2*beta4[2] :+ 6*beta4[3]:*cellmpleft + 12*beta4[4]:*cellmpleft:^2
254
+ hleft = 3.348 * ( mse4*(c-l) / sum( fppleft:^2) )^(1/5)
255
+
256
+ //estimate 4th order polynomial right of the cutoff
257
+ P = st_matrix("P")
258
+ statacom = "reg " + cellvalname + " " + cellmpname + "1-" + cellmpname + "4 if " + cellmpname + " > " + strofreal(c)
259
+ errcode=_stata(statacom,1)
260
+ if (errcode!=0) {
261
+ "Unable to successfully execute the command "+statacom
262
+ "Check whether you have given Stata enough memory"
263
+ }
264
+ mse4 = st_numscalar("e(rmse)")^2
265
+ betaorth4 = st_matrix("e(b)")
266
+ beta4 = betaorth4 * P
267
+ fppright = 2*beta4[2] :+ 6*beta4[3]:*cellmpright + 12*beta4[4]:*cellmpright:^2
268
+ hright = 3.348 * ( mse4*(r-c) / sum( fppright:^2) )^(1/5)
269
+ statacom = "drop " + cellmpname + "1-" + cellmpname + "4"
270
+ errcode=_stata(statacom,1)
271
+ if (errcode!=0) {
272
+ "Unable to successfully execute the command "+statacom
273
+ "Check whether you have given Stata enough memory"
274
+ }
275
+
276
+ //set bandwidth to average of calculations from left and right
277
+ h = 0.5*(hleft + hright)
278
+ if (verbose) printf("Using default bandwidth calculation, bandwidth = %f\n", h)
279
+ }
280
+
281
+ //Add padding zeros to histogram (to assist smoothing)
282
+ real scalar padzeros // number of zeros to pad on each side of hist
283
+ real scalar jp // number of histogram bins including padded zeros
284
+
285
+ padzeros = ceil(h/b) // number of zeros to pad on each side of hist
286
+ jp = j + 2*padzeros
287
+ if (padzeros >= 1) {
288
+ //add padding to histogram variables
289
+ cellval = ( J(padzeros,1,0) \ cellval \ J(padzeros,1,0) )
290
+ cellmp = ( range(l-padzeros*b,l-b,b) \ cellmp \ range(r+b,r+padzeros*b,b) )
291
+ //dump padded histogram variables out to stata
292
+ stcellval[|1\jp|] = cellval
293
+ stcellmp[|1\jp|] = cellmp
294
+ }
295
+
296
+ //Generate point estimate of discontinuity
297
+ real colvector dist // distance from a given observation
298
+ real colvector w // triangle kernel weights
299
+ real matrix XX, Xy // regression matrcies for weighted regression
300
+ real rowvector xmean, ymean // means for demeaning regression vars
301
+ real colvector beta // regression estimates from weighted reg.
302
+ real colvector ehat // predicted errors from weighted reg.
303
+ real scalar fhatr, fhatl // local linear reg. estimates at discontinuity
304
+ // estimated from right and left, respectively
305
+ real scalar thetahat // discontinuity estimate
306
+ real scalar sethetahat // standard error of discontinuity estimate
307
+
308
+ //Estimate left of discontinuity
309
+ dist = cellmp :- c // distance from potential discontinuity
310
+ w = rowmax( (J(jp,1,0), (1:-abs(dist:/h))) ):*(cellmp:<c) // triangle kernel weights for left
311
+ w = (w:/sum(w)) :* jp // normalize weights to sum to number of cells (as does stata aweights)
312
+ xmean = mean(dist, w)
313
+ ymean = mean(cellval, w)
314
+ XX = quadcrossdev(dist,xmean,w,dist,xmean) //fixed error on 11.17.2009
315
+ Xy = quadcrossdev(dist,xmean,w,cellval,ymean)
316
+ beta = invsym(XX)*Xy
317
+ beta = beta \ ymean-xmean*beta
318
+ fhatl = beta[2,1]
319
+
320
+ //Estimate right of discontinuity
321
+ w = rowmax( (J(jp,1,0), (1:-abs(dist:/h))) ):*(cellmp:>=c) // triangle kernel weights for right
322
+ w = (w:/sum(w)) :* jp // normalize weights to sum to number of cells (as does stata aweights)
323
+ xmean = mean(dist, w)
324
+ ymean = mean(cellval, w)
325
+ XX = quadcrossdev(dist,xmean,w,dist,xmean) //fixed error on 11.17.2009
326
+ Xy = quadcrossdev(dist,xmean,w,cellval,ymean)
327
+ beta = invsym(XX)*Xy
328
+ beta = beta \ ymean-xmean*beta
329
+ fhatr = beta[2,1]
330
+
331
+ //Calculate and display discontinuity estimate
332
+ thetahat = ln(fhatr) - ln(fhatl)
333
+ sethetahat = sqrt( (1/(rn*h)) * (24/5) * ((1/fhatr) + (1/fhatl)) )
334
+ printf("\nDiscontinuity estimate (log difference in height): %f\n", thetahat)
335
+ printf(" (%f)\n", sethetahat)
336
+
337
+ loopover=1 //This is an advanced user switch to get rid of LLR smoothing
338
+ //Can be used to speed up simulation runs--the switch avoids smoothing at
339
+ //eval points you aren't studying
340
+
341
+ //Perform local linear regression (LLR) smoothing
342
+ if (loopover==1) {
343
+ real scalar cellsm // smoothed histogram cell values
344
+ real colvector stcellsm // stata view for smoothed values
345
+ real colvector atstata // stata view for at variable (evaluation points)
346
+ real colvector at // points at which to evaluate LLR smoothing
347
+ real scalar evalpts // number of evaluation points
348
+ real colvector steval // stata view for LLR smothing eval points
349
+
350
+ // if evaluating at cell midpoints
351
+ if (atname == cellmpname) {
352
+ at = cellmp[|padzeros+1\padzeros+j|]
353
+ evalpts = j
354
+ }
355
+ else {
356
+ st_view(atstata, ., atname)
357
+ evalpts = nonmissing(atstata)
358
+ at = atstata[|1\evalpts|]
359
+ }
360
+
361
+ if (verbose) printf("Performing LLR smoothing.\n")
362
+ if (verbose) printf("%f iterations will be performed \n",j)
363
+
364
+ cellsm = J(evalpts,1,0) // initialize smoothed histogram cell values to zero
365
+ // loop over all evaluation points
366
+ for (i = 1; i <= evalpts; i++) {
367
+ dist = cellmp :- at[i]
368
+ //set weights relative to current bin - note comma below is row join operator, not two separate args
369
+ w = rowmax( (J(jp,1,0), ///
370
+ (1:-abs(dist:/h))):*((cellmp:>=c)*(at[i]>=c):+(cellmp:<c):*(at[i]<c)) )
371
+ //manually obtain weighted regression coefficients
372
+ w = (w:/sum(w)) :* jp // normalize weights to sum to N (as does stata aweights)
373
+ xmean = mean(dist, w)
374
+ ymean = mean(cellval, w)
375
+ XX = quadcrossdev(dist,xmean,w,dist,xmean) //fixed error on 11.17.2009
376
+ Xy = quadcrossdev(dist,xmean,w,cellval,ymean)
377
+ beta = invsym(XX)*Xy
378
+ beta = beta \ ymean-xmean*beta
379
+ cellsm[i] = beta[2,1]
380
+ //Show dots
381
+ if (verbose) {
382
+ if (mod(i,10) == 0) {
383
+ printf(".")
384
+ displayflush()
385
+ if (mod(i,500) == 0) {
386
+ printf(" %f LLR iterations\n",i)
387
+ displayflush()
388
+ }
389
+ }
390
+ }
391
+ }
392
+ printf("\n")
393
+
394
+ //set up stata variable to hold evaluation points for smoothed values
395
+ (void) st_addvar("float", evalname)
396
+ st_view(steval, ., evalname)
397
+ steval[|1\evalpts|] = at
398
+
399
+ //set up stata variable to hold smoothed values
400
+ (void) st_addvar("float", cellsmname)
401
+ st_view(stcellsm, ., cellsmname)
402
+ stcellsm[|1\evalpts|] = cellsm
403
+
404
+ //Calculate standard errors for LLR smoothed values
405
+ real scalar m // amount of kernel being truncated by breakpoint
406
+ real colvector cellsmse // standard errors of smoothed histogram
407
+ real colvector stcellsmse // stata view for cell midpoint variable
408
+ cellsmse = J(evalpts,1,0) // initialize standard errors to zero
409
+ for (i = 1; i <= evalpts; i++) {
410
+ if (at[i] > c) {
411
+ m = max((-1, (c-at[i])/h))
412
+ cellsmse[i] = ((12*cellsm[i])/(5*rn*h))* ///
413
+ (2-3*m^11-24*m^10-83*m^9-72*m^8+42*m^7+18*m^6-18*m^5+18*m^4-3*m^3+18*m^2-15*m)/ ///
414
+ (1+m^6+6*m^5-3*m^4-4*m^3+9*m^2-6*m)^2
415
+ cellsmse[i] = sqrt(cellsmse[i])
416
+ }
417
+ if (at[i] < c) {
418
+ m = min(((c-at[i])/h, 1))
419
+ cellsmse[i] = ((12*cellsm[i])/(5*rn*h))* ///
420
+ (2+3*m^11-24*m^10+83*m^9-72*m^8-42*m^7+18*m^6+18*m^5+18*m^4-3*m^3+18*m^2+15*m)/ ///
421
+ (1+m^6-6*m^5-3*m^4+4*m^3+9*m^2+6*m)^2
422
+ cellsmse[i] = sqrt(cellsmse[i])
423
+ }
424
+ }
425
+ //set up stata variable to hold standard errors for smoothed values
426
+ (void) st_addvar("float", cellsmsename)
427
+ st_view(stcellsmse, ., cellsmsename)
428
+ stcellsmse[|1\evalpts|] = cellsmse
429
+ }
430
+ //End of loop over evaluation points
431
+
432
+ //Fill in STATA return codes
433
+ st_rclear()
434
+ st_numscalar("r(theta)", thetahat)
435
+ st_numscalar("r(se)", sethetahat)
436
+ st_numscalar("r(binsize)", b)
437
+ st_numscalar("r(bandwidth)", h)
438
+ }
439
+ end
440
+
30/replication_package/Adofiles/rd_2021/rdbwdensity.ado ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ********************************************************************************
2
+ * RDDENSITY STATA PACKAGE -- rdbwdensity
3
+ * Authors: Matias D. Cattaneo, Michael Jansson, Xinwei Ma
4
+ ********************************************************************************
5
+ *!version 2.3 2021-02-28
6
+
7
+ capture program drop rdbwdensity
8
+
9
+ program define rdbwdensity, eclass
10
+ syntax varlist(max=1) [if] [in] [, ///
11
+ C(real 0) ///
12
+ P(integer 2) ///
13
+ KERnel(string) ///
14
+ FITselect(string) ///
15
+ VCE(string) ///
16
+ noREGularize ///
17
+ NLOCalmin (integer -1) ///
18
+ NUNIquemin (integer -1) ///
19
+ noMASSpoints ///
20
+ ]
21
+
22
+ marksample touse
23
+
24
+ if ("`kernel'"=="") local kernel = "triangular"
25
+ local kernel = lower("`kernel'")
26
+ if ("`fitselect'"=="") local fitselect = "unrestricted"
27
+ local fitselect = lower("`fitselect'")
28
+ if ("`vce'"=="") local vce = "jackknife"
29
+ local vce = lower("`vce'")
30
+
31
+ preserve
32
+ qui keep if `touse'
33
+
34
+ local x "`varlist'"
35
+
36
+ qui drop if `x'==.
37
+
38
+ qui su `x'
39
+ local x_min = r(min)
40
+ local x_max = r(max)
41
+ local N = r(N)
42
+
43
+ qui su `x' if `x'<`c'
44
+ local xl_min = r(min)
45
+ local xl_max = r(max)
46
+ local Nl = r(N)
47
+
48
+ qui su `x' if `x'>=`c'
49
+ local xr_min = r(min)
50
+ local xr_max = r(max)
51
+ local Nr = r(N)
52
+
53
+ ****************************************************************************
54
+ *** BEGIN ERROR HANDLING ***************************************************
55
+ if (`c'<=`x_min' | `c'>=`x_max'){
56
+ di "{err}{cmd:c()} should be set within the range of `x'."
57
+ exit 125
58
+ }
59
+
60
+ if (`Nl'<10 | `Nr'<10){
61
+ di "{err}Not enough observations to perform calculations."
62
+ exit 2001
63
+ }
64
+
65
+ if (`p'!=1 & `p'!=2 & `p'!=3 & `p'!=4 & `p'!=5 & `p'!=6 & `p'!=7){
66
+ di "{err}{cmd:p()} should be an integer value less or equal than 7."
67
+ exit 125
68
+ }
69
+
70
+ if ("`kernel'"!="uniform" & "`kernel'"!="triangular" & "`kernel'"!="epanechnikov"){
71
+ di "{err}{cmd:kernel()} incorrectly specified."
72
+ exit 7
73
+ }
74
+
75
+ if ("`fitselect'"!="restricted" & "`fitselect'"!="unrestricted"){
76
+ di "{err}{cmd:fitselect()} incorrectly specified."
77
+ exit 7
78
+ }
79
+
80
+ if ("`vce'"!="jackknife" & "`vce'"!="plugin"){
81
+ di "{err}{cmd:vce()} incorrectly specified."
82
+ exit 7
83
+ }
84
+
85
+ if ("`regularize'" == "") {
86
+ local regularize = 1
87
+ }
88
+ else {
89
+ local regularize = 0
90
+ }
91
+
92
+ if ("`masspoints'" == "") {
93
+ local masspoints = 1
94
+ }
95
+ else {
96
+ local masspoints = 0
97
+ }
98
+
99
+ if (`nlocalmin' < 0) {
100
+ local nlocalmin = 20 + `p' + 1
101
+ }
102
+
103
+ if (`nuniquemin' < 0) {
104
+ local nuniquemin = 20 + `p' + 1
105
+ }
106
+ *** END ERROR HANDLING *****************************************************
107
+ ****************************************************************************
108
+
109
+ qui replace `x' = `x'-`c'
110
+ qui sort `x'
111
+
112
+ ****************************************************************************
113
+ *** BEGIN MATA ESTIMATION **************************************************
114
+ mata{
115
+ *display("got here!")
116
+ X = st_data(.,("`x'"), 0);
117
+
118
+ XUnique = rddensity_unique(X)
119
+ freqUnique = XUnique[., 2]
120
+ indexUnique = XUnique[., 4]
121
+ XUnique = XUnique[., 1]
122
+ NUnique = length(XUnique)
123
+ NlUnique = sum(XUnique :< 0)
124
+ NrUnique = sum(XUnique :>= 0)
125
+
126
+ masspoints_flag = sum(freqUnique :!= 1) > 0 & `masspoints'
127
+ st_numscalar("masspoints_flag", masspoints_flag)
128
+
129
+ ****************************************************************************
130
+ ** Kernel Constants
131
+ ****************************************************************************
132
+ if ("`fitselect'"=="unrestricted") {
133
+ if ("`kernel'"=="uniform") {
134
+ Bsq_p=(0.24999999999999966693,0.01000000000000004878,0.00014172335600917503246,0.00000098418997230168060921,0.0000000039855627124297920874,0.000000000010481435883708594505,0.000000000000019251413808407223054,0.000000000000000026041096146069883723)
135
+ }
136
+ else if ("`kernel'"=="triangular") {
137
+ Bsq_p=(0.15999999999999992006,0.0051020408163267062795,0.00006298815822632821272,0.00000039855626977185269366,0.0000000015093255191922687787,0.0000000000037733140674455142929,0.0000000000000066614606382066531783,0.00000000000000000871923521295076001)
138
+ }
139
+ else if ("`kernel'"=="epanechnikov") {
140
+ Bsq_p=(0.17728531855955703689,0.0059878117913833833058,0.000076742107318398123782,0.00000049855793475223530487,0.0000000019253854002580922299,0.0000000000048868584327480008077,0.0000000000000087317551910484345913,0.000000000000000011557177676615075784)
141
+ }
142
+ }
143
+ else if ("`fitselect'"=="restricted") {
144
+ if ("`kernel'"=="uniform") {
145
+ Splus=(0,0,0,0,0,0,0,0,0,0,0,0\0,0.1666666667,0.25,0.125,0.1,0.08333333333,0.07142857143,0.0625,0.05555555556,0.05,0.04545454545,0.04166666667\0,0.25,0.5,0.1666666667,0.125,0.1,0.08333333333,0.07142857143,0.0625,0.05555555556,0.05,0.04545454545\0,0.125,0.1666666667,0.1,0.08333333333,0.07142857143,0.0625,0.05555555556,0.05,0.04545454545,0.04166666667,0.03846153846\0,0.1,0.125,0.08333333333,0.07142857143,0.0625,0.05555555556,0.05,0.04545454545,0.04166666667,0.03846153846,0.03571428571\0,0.08333333333,0.1,0.07142857143,0.0625,0.05555555556,0.05,0.04545454545,0.04166666667,0.03846153846,0.03571428571,0.03333333333\0,0.07142857143,0.08333333333,0.0625,0.05555555556,0.05,0.04545454545,0.04166666667,0.03846153846,0.03571428571,0.03333333333,0.03125\0,0.0625,0.07142857143,0.05555555556,0.05,0.04545454545,0.04166666667,0.03846153846,0.03571428571,0.03333333333,0.03125,0.02941176471\0,0.05555555556,0.0625,0.05,0.04545454545,0.04166666667,0.03846153846,0.03571428571,0.03333333333,0.03125,0.02941176471,0.02777777778\0,0.05,0.05555555556,0.04545454545,0.04166666667,0.03846153846,0.03571428571,0.03333333333,0.03125,0.02941176471,0.02777777778,0.02631578947\0,0.04545454545,0.05,0.04166666667,0.03846153846,0.03571428571,0.03333333333,0.03125,0.02941176471,0.02777777778,0.02631578947,0.025\0,0.04166666667,0.04545454545,0.03846153846,0.03571428571,0.03333333333,0.03125,0.02941176471,0.02777777778,0.02631578947,0.025,0.02380952381)
146
+ Gplus=(0,0,0,0,0,0,0,0,0,0,0,0\0,0.03333333333,0.05208333333,0.02430555556,0.01904761905,0.015625,0.01322751323,0.01145833333,0.0101010101,0.009027777778,0.008158508159,0.00744047619\0,0.05208333333,0.08333333333,0.0375,0.02916666667,0.02380952381,0.02008928571,0.01736111111,0.01527777778,0.01363636364,0.01231060606,0.01121794872\0,0.02430555556,0.0375,0.01785714286,0.0140625,0.01157407407,0.009821428571,0.008522727273,0.007523148148,0.006730769231,0.006087662338,0.005555555556\0,0.01904761905,0.02916666667,0.0140625,0.01111111111,0.009166666667,0.007792207792,0.006770833333,0.005982905983,0.005357142857,0.004848484848,0.004427083333\0,0.015625,0.02380952381,0.01157407407,0.009166666667,0.007575757576,0.006448412698,0.005608974359,0.00496031746,0.004444444444,0.004024621212,0.003676470588\0,0.01322751323,0.02008928571,0.009821428571,0.007792207792,0.006448412698,0.005494505495,0.004783163265,0.004232804233,0.003794642857,0.003437738732,0.003141534392\0,0.01145833333,0.01736111111,0.008522727273,0.006770833333,0.005608974359,0.004783163265,0.004166666667,0.003689236111,0.003308823529,0.002998737374,0.00274122807\0,0.0101010101,0.01527777778,0.007523148148,0.005982905983,0.00496031746,0.004232804233,0.003689236111,0.003267973856,0.002932098765,0.002658160553,0.002430555556\0,0.009027777778,0.01363636364,0.006730769231,0.005357142857,0.004444444444,0.003794642857,0.003308823529,0.002932098765,0.002631578947,0.002386363636,0.002182539683\0,0.008158508159,0.01231060606,0.006087662338,0.004848484848,0.004024621212,0.003437738732,0.002998737374,0.002658160553,0.002386363636,0.002164502165,0.001980027548\0,0.00744047619,0.01121794872,0.005555555556,0.004427083333,0.003676470588,0.003141534392,0.00274122807,0.002430555556,0.002182539683,0.001980027548,0.001811594203)
147
+ }
148
+ else if ("`kernel'"=="triangular") {
149
+ Splus=(0,0,0,0,0,0,0,0,0,0,0,0\0,0.08333333333,0.1666666667,0.05,0.03333333333,0.02380952381,0.01785714286,0.01388888889,0.01111111111,0.009090909091,0.007575757576,0.00641025641\0,0.1666666667,0.5,0.08333333333,0.05,0.03333333333,0.02380952381,0.01785714286,0.01388888889,0.01111111111,0.009090909091,0.007575757576\0,0.05,0.08333333333,0.03333333333,0.02380952381,0.01785714286,0.01388888889,0.01111111111,0.009090909091,0.007575757576,0.00641025641,0.005494505495\0,0.03333333333,0.05,0.02380952381,0.01785714286,0.01388888889,0.01111111111,0.009090909091,0.007575757576,0.00641025641,0.005494505495,0.004761904762\0,0.02380952381,0.03333333333,0.01785714286,0.01388888889,0.01111111111,0.009090909091,0.007575757576,0.00641025641,0.005494505495,0.004761904762,0.004166666667\0,0.01785714286,0.02380952381,0.01388888889,0.01111111111,0.009090909091,0.007575757576,0.00641025641,0.005494505495,0.004761904762,0.004166666667,0.003676470588\0,0.01388888889,0.01785714286,0.01111111111,0.009090909091,0.007575757576,0.00641025641,0.005494505495,0.004761904762,0.004166666667,0.003676470588,0.003267973856\0,0.01111111111,0.01388888889,0.009090909091,0.007575757576,0.00641025641,0.005494505495,0.004761904762,0.004166666667,0.003676470588,0.003267973856,0.002923976608\0,0.009090909091,0.01111111111,0.007575757576,0.00641025641,0.005494505495,0.004761904762,0.004166666667,0.003676470588,0.003267973856,0.002923976608,0.002631578947\0,0.007575757576,0.009090909091,0.00641025641,0.005494505495,0.004761904762,0.004166666667,0.003676470588,0.003267973856,0.002923976608,0.002631578947,0.002380952381\0,0.00641025641,0.007575757576,0.005494505495,0.004761904762,0.004166666667,0.003676470588,0.003267973856,0.002923976608,0.002631578947,0.002380952381,0.002164502165)
150
+ Gplus=(0,0,0,0,0,0,0,0,0,0,0,0\0,0.01031746032,0.02222222222,0.005853174603,0.003736772487,0.002579365079,0.001881914382,0.001430976431,0.001123413623,0.0009046509047,0.0007437007437,0.0006219474969\0,0.02222222222,0.05,0.0123015873,0.007738095238,0.005291005291,0.003835978836,0.002904040404,0.002272727273,0.001825951826,0.001498501499,0.001251526252\0,0.005853174603,0.0123015873,0.003373015873,0.002175925926,0.001512746513,0.001109307359,0.0008466070966,0.0006664631665,0.0005377955378,0.0004428210678,0.0003707893414\0,0.003736772487,0.007738095238,0.002175925926,0.001414141414,0.0009884559885,0.0007277444777,0.0005570818071,0.0004395604396,0.0003553391053,0.0002930035651,0.000245621753\0,0.002579365079,0.005291005291,0.001512746513,0.0009884559885,0.0006937506938,0.000512384441,0.0003931914646,0.0003108465608,0.0002516764281,0.0002077851343,0.0001743612425\0,0.001881914382,0.003835978836,0.001109307359,0.0007277444777,0.000512384441,0.0003793825222,0.0002917139078,0.0002309951758,0.0001872718784,0.000154780147,0.0001299991432\0,0.001430976431,0.002904040404,0.0008466070966,0.0005570818071,0.0003931914646,0.0002917139078,0.0002246732026,0.0001781499637,0.0001445917726,0.0001196172249,0.0001005451663\0,0.001123413623,0.002272727273,0.0006664631665,0.0004395604396,0.0003108465608,0.0002309951758,0.0001781499637,0.0001414210909,0.0001148916061,9.512417407e-05,8.001258001e-05\0,0.0009046509047,0.001825951826,0.0005377955378,0.0003553391053,0.0002516764281,0.0001872718784,0.0001445917726,0.0001148916061,9.341535657e-05,7.739735012e-05,6.514127067e-05\0,0.0007437007437,0.001498501499,0.0004428210678,0.0002930035651,0.0002077851343,0.000154780147,0.0001196172249,9.512417407e-05,7.739735012e-05,6.416508393e-05,5.403303328e-05\0,0.0006219474969,0.001251526252,0.0003707893414,0.000245621753,0.0001743612425,0.0001299991432,0.0001005451663,8.001258001e-05,6.514127067e-05,5.403303328e-05,4.552211074e-05)
151
+ }
152
+ else if ("`kernel'"=="epanechnikov") {
153
+ Splus=(0,0,0,0,0,0,0,0,0,0,0,0\0,0.1,0.1875,0.0625,0.04285714286,0.03125,0.02380952381,0.01875,0.01515151515,0.0125,0.01048951049,0.008928571429\0,0.1875,0.5,0.1,0.0625,0.04285714286,0.03125,0.02380952381,0.01875,0.01515151515,0.0125,0.01048951049\0,0.0625,0.1,0.04285714286,0.03125,0.02380952381,0.01875,0.01515151515,0.0125,0.01048951049,0.008928571429,0.007692307692\0,0.04285714286,0.0625,0.03125,0.02380952381,0.01875,0.01515151515,0.0125,0.01048951049,0.008928571429,0.007692307692,0.006696428571\0,0.03125,0.04285714286,0.02380952381,0.01875,0.01515151515,0.0125,0.01048951049,0.008928571429,0.007692307692,0.006696428571,0.005882352941\0,0.02380952381,0.03125,0.01875,0.01515151515,0.0125,0.01048951049,0.008928571429,0.007692307692,0.006696428571,0.005882352941,0.005208333333\0,0.01875,0.02380952381,0.01515151515,0.0125,0.01048951049,0.008928571429,0.007692307692,0.006696428571,0.005882352941,0.005208333333,0.004643962848\0,0.01515151515,0.01875,0.0125,0.01048951049,0.008928571429,0.007692307692,0.006696428571,0.005882352941,0.005208333333,0.004643962848,0.004166666667\0,0.0125,0.01515151515,0.01048951049,0.008928571429,0.007692307692,0.006696428571,0.005882352941,0.005208333333,0.004643962848,0.004166666667,0.003759398496\0,0.01048951049,0.0125,0.008928571429,0.007692307692,0.006696428571,0.005882352941,0.005208333333,0.004643962848,0.004166666667,0.003759398496,0.003409090909\0,0.008928571429,0.01048951049,0.007692307692,0.006696428571,0.005882352941,0.005208333333,0.004643962848,0.004166666667,0.003759398496,0.003409090909,0.003105590062)
154
+ Gplus=(0,0,0,0,0,0,0,0,0,0,0,0\0,0.01428571429,0.028515625,0.008515625,0.005627705628,0.003984375,0.002963702964,0.002287946429,0.001818181818,0.001478794643,0.001225832991,0.001032366071\0,0.028515625,0.05892857143,0.01666666667,0.01088169643,0.007643398268,0.005654761905,0.004348776224,0.00344629329,0.002797202797,0.002315067745,0.001947317388\0,0.008515625,0.01666666667,0.005140692641,0.003426339286,0.002440268065,0.001822916667,0.001411713287,0.001124526515,0.0009162895928,0.0007606325966,0.0006413091552\0,0.005627705628,0.01088169643,0.003426339286,0.002297702298,0.001643813776,0.001232101232,0.0009566326531,0.0007635501753,0.000623139881,0.0005179340783,0.0004371279762\0,0.003984375,0.007643398268,0.002440268065,0.001643813776,0.00118006993,0.0008868781888,0.0006900452489,0.0005516943994,0.0004508513932,0.0003751456876,0.000316903077\0,0.002963702964,0.005654761905,0.001822916667,0.001232101232,0.0008868781888,0.0006679594915,0.0005206118906,0.0004168174447,0.0003410218254,0.0002840296958,0.0002401244589\0,0.002287946429,0.004348776224,0.001411713287,0.0009566326531,0.0006900452489,0.0005206118906,0.0004063467492,0.0003257181187,0.0002667514374,0.0002223557692,0.0001881158642\0,0.001818181818,0.00344629329,0.001124526515,0.0007635501753,0.0005516943994,0.0004168174447,0.0003257181187,0.0002613485586,0.0002142160239,0.0001786923984,0.0001512691854\0,0.001478794643,0.002797202797,0.0009162895928,0.000623139881,0.0004508513932,0.0003410218254,0.0002667514374,0.0002142160239,0.0001757110167,0.0001466644151,0.0001242236025\0,0.001225832991,0.002315067745,0.0007606325966,0.0005179340783,0.0003751456876,0.0002840296958,0.0002223557692,0.0001786923984,0.0001466644151,0.0001224862094,0.0001037942608\0,0.001032366071,0.001947317388,0.0006413091552,0.0004371279762,0.000316903077,0.0002401244589,0.0001881158642,0.0001512691854,0.0001242236025,0.0001037942608,8.799171843e-05)
155
+ }
156
+ }
157
+ Psi=(0,-1,0,0,0,0,0,0,0,0,0,0\-1,0,0,0,0,0,0,0,0,0,0,0\0,0,1,0,0,0,0,0,0,0,0,0\0,0,0,1,0,0,0,0,0,0,0,0\0,0,0,0,-1,0,0,0,0,0,0,0\0,0,0,0,0,1,0,0,0,0,0,0\0,0,0,0,0,0,-1,0,0,0,0,0\0,0,0,0,0,0,0,1,0,0,0,0\0,0,0,0,0,0,0,0,-1,0,0,0\0,0,0,0,0,0,0,0,0,1,0,0\0,0,0,0,0,0,0,0,0,0,-1,0\0,0,0,0,0,0,0,0,0,0,0,1)
158
+
159
+ ****************************************************************************
160
+ ** Select preliminary bandwidths.
161
+ ****************************************************************************
162
+ mu = mean(X); sd = (variance(X))^(1/2)
163
+
164
+ fhatb = sd^(2*`p'+5) * normalden(-mu/sd) / (rddensity_h(-mu/sd,`p'+2) * normalden(-mu/sd))^2
165
+ C_b = (25884.444444494150957,3430865.4551236177795,845007948.04262602329,330631733667.03808594,187774809656037.3125,145729502641999264,146013502974449876992)
166
+ b = ((2*`p'+1)/4 * fhatb * C_b[`p']/`N')^(1/(2*`p'+5))
167
+
168
+ fhatc = sd^(2*`p'+1) * normalden(-mu/sd) / (rddensity_h(-mu/sd,`p') * normalden(-mu/sd))^2
169
+ C_c = (4.8000000000000246914,548.57142857155463389,100800.00000020420703,29558225.458100609481,12896196859.612621307,7890871468221.609375,6467911284037581)
170
+ c = (1/(2*`p') * fhatc * C_c[`p']/`N')^(1/(2*`p'+1))
171
+
172
+ // b is for higher-order derivative estimation
173
+ // c is for density estimation
174
+
175
+ if (`regularize') {
176
+
177
+ // bandwidth should not exceed the range of data
178
+ b = min( (b, max(abs(XUnique))) )
179
+ c = min( (c, max(abs(XUnique))) )
180
+
181
+ // nlocalmin check
182
+
183
+ if (`nlocalmin' > 0) {
184
+ b = max((b, sort(abs(X[selectindex(X :< 0)]), 1)[min((20+`p'+2+1, `Nl'))], (X[selectindex(X :>= 0)])[min((20+`p'+2+1, `Nr'))]))
185
+ c = max((c, sort(abs(X[selectindex(X :< 0)]), 1)[min((20+`p'+ 1, `Nl'))], (X[selectindex(X :>= 0)])[min((20+`p' +1, `Nr'))]))
186
+ }
187
+
188
+ // nuniquemin check
189
+ if (`nuniquemin' > 0) {
190
+ b = max((b, sort(abs(XUnique[selectindex(XUnique :< 0)]), 1)[min((20+`p'+2+1, NlUnique))], (XUnique[selectindex(XUnique :>= 0)])[min((20+`p'+2+1, NrUnique))]))
191
+ c = max((c, sort(abs(XUnique[selectindex(XUnique :< 0)]), 1)[min((20+`p' +1, NlUnique))], (XUnique[selectindex(XUnique :>= 0)])[min((20+`p' +1, NrUnique))]))
192
+ }
193
+ }
194
+
195
+ st_numscalar("BW_b", b)
196
+ st_numscalar("BW_c", c)
197
+
198
+ ****************************************************************************
199
+ ** Estimate main bandwidths.
200
+ ****************************************************************************
201
+ Xb = select(X, -b:<=X :& X:<=b)
202
+ Nlb = sum(-b:<=X :& X:<0)
203
+ Nrb = rows(Xb) - Nlb
204
+
205
+ Xc = select(X, -c:<=X :& X:<=c)
206
+ Nlc = sum(-c:<=X :& X:<0)
207
+ Nrc = rows(Xc) - Nlc
208
+
209
+ Ytemp = (0..(`N'-1))' :/ (`N'-1)
210
+ if (`masspoints') {
211
+ Ytemp = rddensity_rep(Ytemp[indexUnique], freqUnique)
212
+ }
213
+ Yb = select(Ytemp, -b:<=X :& X:<=b)
214
+ Yc = select(Ytemp, -c:<=X :& X:<=c)
215
+
216
+ h = J(4,3,0)
217
+
218
+ fV_b = rddensity_fv(Yb, Xb, `Nl', `Nr', Nlb, Nrb, b, b, `p'+2 , `p'+1, "`kernel'", "`fitselect'", "`vce'", `masspoints')
219
+ fV_c = rddensity_fv(Yc, Xc, `Nl', `Nr', Nlc, Nrc, c, c, `p' , 1 , "`kernel'", "`fitselect'", "`vce'", `masspoints')
220
+
221
+
222
+ h[.,2] = `N'*c*fV_c[.,2]
223
+
224
+ if ("`fitselect'"=="unrestricted") {
225
+ h[1,3] = fV_b[1,3] * Bsq_p[`p']^(1/2) * (-1)^`p' * factorial(`p'+1)
226
+ h[2,3] = fV_b[2,3] * Bsq_p[`p']^(1/2) * factorial(`p'+1)
227
+ }
228
+ else if ("`fitselect'"=="restricted") {
229
+ Psi = Psi[1..`p'+2,1..`p'+2];
230
+ Gplus = Gplus[1..`p'+2,1..`p'+2]; Gminus = Psi*Gplus*Psi;
231
+ vplus = Splus[1..`p'+2,`p'+3]; vminus = Psi*vplus;
232
+ Splus = Splus[1..`p'+2,1..`p'+2]; Sminus = Psi*Splus*Psi;
233
+ S = invsym(fV_c[2,1] * Splus + fV_c[1,1] * Sminus);
234
+ B = fV_b[1,3] * S[1..2,] * (fV_c[1,1] * (-1)^(`p'+1) * vminus + fV_c[2,1] * vplus)
235
+ h[1,3] = B[1,1]
236
+ h[2,3] = B[2,1]
237
+ }
238
+
239
+ h[3,3] = h[2,3] - h[1,3]; h[4,3] = h[2,3] + h[1,3]; h[.,3] = h[.,3]:^2;
240
+ h[.,1] = ((1/(2*`p')) * (h[.,2]:/h[.,3]) * (1/`N')):^(1/(2*`p'+1));
241
+
242
+ if (`regularize') {
243
+
244
+ for (i=1; i<=4; i++) {
245
+ if (h[i, 2] < 0) {
246
+ h[i, 1] = 0
247
+ h[i, 2] = .
248
+ }
249
+ if (h[i, 1] == .) {
250
+ h[i, 1] = 0
251
+ }
252
+ }
253
+
254
+ // bandwidth should not exceed the range of data
255
+ h[1,1] = min((h[1,1], abs(XUnique[1])))
256
+ h[2,1] = min((h[2,1], XUnique[NUnique]))
257
+ h[3,1] = min((h[3,1], max((abs(XUnique[1]), XUnique[NUnique]))))
258
+ h[4,1] = min((h[4,1], max((abs(XUnique[1]), XUnique[NUnique]))))
259
+
260
+ // nlocalmin check
261
+ if (`nlocalmin' > 0) {
262
+ hlMin = sort(abs(X[selectindex(X :< 0)]), 1)[min((`Nl', `nlocalmin'))]
263
+ hrMin = (X[selectindex(X :>= 0)])[min((`Nr', `nlocalmin'))]
264
+ h[1,1] = max((h[1,1], hlMin))
265
+ h[2,1] = max((h[2,1], hrMin))
266
+ h[3,1] = max((h[3,1], hlMin, hrMin))
267
+ h[4,1] = max((h[4,1], hlMin, hrMin))
268
+ }
269
+
270
+ // nuniquemin check
271
+ if (`nuniquemin' > 0) {
272
+ hlMin = sort(abs(XUnique[selectindex(XUnique :< 0)]),1)[min((NlUnique, `nuniquemin'))]
273
+ hrMin = (XUnique[selectindex(XUnique :>= 0)])[min((NrUnique, `nuniquemin'))]
274
+ h[1,1] = max((h[1,1], hlMin))
275
+ h[2,1] = max((h[2,1], hrMin))
276
+ h[3,1] = max((h[3,1], hlMin, hrMin))
277
+ h[4,1] = max((h[4,1], hlMin, hrMin))
278
+ }
279
+ }
280
+
281
+ st_matrix("h", h);
282
+
283
+ *display("Estimation completed.");
284
+ }
285
+ *** END MATA ESTIMATION ****************************************************
286
+ ****************************************************************************
287
+
288
+ ****************************************************************************
289
+ *** BEGIN OUTPUT TABLE *****************************************************
290
+ if (masspoints_flag == 1) {
291
+ disp ""
292
+ disp "Point estimates and standard errors have been adjusted for repeated observations."
293
+ disp "(Use option {it:nomasspoints} to suppress this adjustment.)"
294
+ }
295
+
296
+ disp ""
297
+ disp "Bandwidth selection for manipulation testing."
298
+
299
+ disp ""
300
+ disp in smcl in gr "Cutoff " in ye "c = " %10.3f `c' _col(22) " {c |} " _col(23) in gr "Left of " in ye "c" _col(36) in gr "Right of " in y "c" _col(58) in gr "Number of obs = " in ye %12.0f `N'
301
+ disp in smcl in gr "{hline 22}{c +}{hline 22}" _col(58) in gr "Model = " in ye "{ralign 12:`fitselect'}"
302
+ disp in smcl in gr "{ralign 21:Number of obs}" _col(22) " {c |} " _col(23) as result %9.0f `Nl' _col(37) as result %9.0f `Nr' _col(58) in gr "Kernel = " in ye "{ralign 12:`kernel'}"
303
+ disp in smcl in gr "{ralign 21:Min Running var.}" _col(22) " {c |} " _col(23) as result %9.3f `xl_min' _col(37) as result %9.3f `xr_min' _col(58) in gr "VCE method = " in ye "{ralign 12:`vce'}"
304
+ disp in smcl in gr "{ralign 21:Max Running var.}" _col(22) " {c |} " _col(23) as result %9.3f `xl_max' _col(37) as result %9.3f `xr_max'
305
+ disp in smcl in gr "{ralign 21:Order loc. poly. (p)}" _col(22) " {c |} " _col(23) as result %9.0f `p' _col(37) as result %9.0f `p'
306
+
307
+ disp ""
308
+ disp "Running variable: `x'."
309
+ disp in smcl in gr "{hline 22}{c TT}{hline 34}"
310
+ disp in smcl in gr "{ralign 21:Target}" _col(22) " {c |} " _col(23) in gr "Bandwidth" _col(37) " Variance" _col(49) " Bias^2"
311
+ disp in smcl in gr "{hline 22}{c +}{hline 34}"
312
+ disp in smcl in gr "{ralign 21:left density}" _col(22) " {c |} " _col(23) as result %9.3f h[1,1] _col(37) as result %9.3f h[1,2] _col(49) as result %9.3f h[1,3]
313
+ disp in smcl in gr "{ralign 21:right density}" _col(22) " {c |} " _col(23) as result %9.3f h[2,1] _col(37) as result %9.3f h[2,2] _col(49) as result %9.3f h[2,3]
314
+ disp in smcl in gr "{ralign 21:difference densities}" _col(22) " {c |} " _col(23) as result %9.3f h[3,1] _col(37) as result %9.3f h[3,2] _col(49) as result %9.3f h[3,3]
315
+ disp in smcl in gr "{ralign 21:sum densities}" _col(22) " {c |} " _col(23) as result %9.3f h[4,1] _col(37) as result %9.3f h[4,2] _col(49) as result %9.3f h[4,3]
316
+ disp in smcl in gr "{hline 22}{c BT}{hline 34}"
317
+ disp ""
318
+ *** END OUTPUT TABLE *******************************************************
319
+ ****************************************************************************
320
+
321
+ restore
322
+
323
+ ereturn clear
324
+ ereturn scalar c = `c'
325
+ ereturn scalar p = `p'
326
+ ereturn scalar N_l = `Nl'
327
+ ereturn scalar N_r = `Nr'
328
+ mat rown h = f_left f_right f_diff f_sum
329
+ mat coln h = bandwidth var bias2
330
+ ereturn matrix h = h
331
+ ereturn scalar BW_b = BW_b
332
+ ereturn scalar BW_c = BW_c
333
+
334
+ ereturn local runningvar "`x'"
335
+ ereturn local kernel = "`kernel'"
336
+ ereturn local fitmethod = "`fitselect'"
337
+ ereturn local vce = "`vce'"
338
+
339
+ mata: mata clear
340
+
341
+ end
342
+
30/replication_package/Adofiles/rd_2021/rdbwdensity.sthlp ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *!version 2.3 2021-02-28}{...}
3
+ {viewerjumpto "Syntax" "rdrobust##syntax"}{...}
4
+ {viewerjumpto "Description" "rdrobust##description"}{...}
5
+ {viewerjumpto "Options" "rdrobust##options"}{...}
6
+ {viewerjumpto "Examples" "rdrobust##examples"}{...}
7
+ {viewerjumpto "Saved results" "rdrobust##saved_results"}{...}
8
+
9
+ {title:Title}
10
+
11
+ {p 4 8}{cmd:rdbwdensity} {hline 2} Bandwidth Selection for Manipulation Testing Using Local Polynomial Density Estimation.{p_end}
12
+
13
+ {marker syntax}{...}
14
+ {title:Syntax}
15
+
16
+ {p 4 8}{cmd:rdbwdensity} {it:Var} {ifin}
17
+ [{cmd:,} {p_end}
18
+ {p 16 20}
19
+ {cmd:c(}{it:#}{cmd:)}
20
+ {cmd:p(}{it:#}{cmd:)}
21
+ {cmd:kernel(}{it:KernelFn}{cmd:)}
22
+ {cmd:fitselect(}{it:FitMethod}{cmd:)}
23
+ {cmd:vce(}{it:VceMethod}{cmd:)}
24
+ {cmd:nomasspoints}{p_end}
25
+ {p 16 20}
26
+ {cmd:nlocalmin(}{it:#}{cmd:)}
27
+ {cmd:nuniquemin(}{it:#}{cmd:)}
28
+ {cmd:noregularize}{p_end}
29
+ {p 16 20}]{p_end}
30
+
31
+ {marker description}{...}
32
+ {title:Description}
33
+
34
+ {p 4 8}{cmd:rdbwdensity} implements several data-driven bandwidth selection methods useful to construct manipulation testing procedures using the local polynomial density estimators proposed in
35
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2020_JASA.pdf":Cattaneo, Jansson and Ma (2020)}.{p_end}
36
+
37
+ {p 4 8}A detailed introduction to this Stata command is given in {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2018_Stata.pdf":Cattaneo, Jansson and Ma (2018)}.{p_end}
38
+ {p 8 8}Companion {browse "www.r-project.org":R} functions are also available {browse "https://rdpackages.github.io/rddensity":here}.{p_end}
39
+
40
+ {p 4 8}Companion function is {help rddensity:rddensity}.
41
+ See also the
42
+ {browse "https://nppackages.github.io/lpdensity":lpdensity}
43
+ package for other related bandwidth selection methods.{p_end}
44
+
45
+ {p 4 8}Related Stata and R packages useful for inference in regression discontinuity (RD) designs are described in the following website:{p_end}
46
+
47
+ {p 8 8}{browse "https://rdpackages.github.io/":https://rdpackages.github.io/}{p_end}
48
+
49
+ {marker options}{...}
50
+ {title:Options}
51
+
52
+ {dlgtab:Bandwidth Selection}
53
+
54
+ {p 4 8}{opt c:}{cmd:(}{it:#}{cmd:)} specifies the threshold or cutoff value in the support of {it:Var}, which determines the two samples (e.g., control and treatment units in RD settings).
55
+ Default is {cmd:c(0)}.{p_end}
56
+
57
+ {p 4 8}{opt p:}{cmd:(}{it:#}{cmd:)} specifies the local polynomial order used to construct the density estimators.
58
+ Default is {cmd:p(2)} (local quadratic approximation).{p_end}
59
+
60
+ {p 4 8}{opt fit:select}{cmd:(}{it:FitMethod}{cmd:)} specifies the density estimation method.{p_end}
61
+ {p 8 12}{opt unrestricted}{bind:} for density estimation without any restrictions (two-sample, unrestricted inference).
62
+ This is the default option.{p_end}
63
+ {p 8 12}{opt restricted}{bind:} for density estimation assuming equal distribution function and higher-order derivatives.{p_end}
64
+
65
+ {p 4 8}{opt ker:nel}{cmd:(}{it:KernelFn}{cmd:)} specifies the kernel function used to construct the local polynomial estimators.{p_end}
66
+ {p 8 12}{opt triangular}{bind: } {it:K(u) = (1 - |u|) * (|u|<=1)}.
67
+ This is the default option.{p_end}
68
+ {p 8 12}{opt epanechnikov}{bind:} {it:K(u) = 0.75 * (1 - u^2) * (|u|<=1)}.{p_end}
69
+ {p 8 12}{opt uniform}{bind: } {it:K(u) = 0.5 * (|u|<=1)}.{p_end}
70
+
71
+ {p 4 8}{opt vce:}{cmd:(}{it:VceMethod}{cmd:)} specifies the procedure used to compute the variance-covariance matrix estimator.{p_end}
72
+ {p 8 12}{opt plugin}{bind: } for asymptotic plug-in standard errors.{p_end}
73
+ {p 8 12}{opt jackknife}{bind:} for jackknife standard errors.
74
+ This is the default option.{p_end}
75
+
76
+ {p 4 8}{opt nomass:points} will not adjust for mass points in the data.{p_end}
77
+
78
+ {dlgtab:Local Sample Size Checking}
79
+
80
+ {p 4 8}{opt nloc:almin}{cmd:(}{it:#}{cmd:)} specifies the minimum number of observations in each local neighborhood.
81
+ This option will be ignored if set to 0, or if {cmd:noregularize} is used.
82
+ The default value is {cmd:20+p(}{it:#}{cmd:)+1}.{p_end}
83
+
84
+ {p 4 8}{opt nuni:quemin}{cmd:(}{it:#}{cmd:)} specifies the minimum number of unique observations in each local neighborhood.
85
+ This option will be ignored if set to 0, or if {cmd:noregularize} is used.
86
+ The default value is {cmd:20+p(}{it:#}{cmd:)+1}.{p_end}
87
+
88
+ {p 4 8}{opt noreg:ularize} suppresses the local sample size checking feature.{p_end}
89
+
90
+
91
+ {marker examples}{...}
92
+ {title:Example: Cattaneo, Frandsen and Titiunik (2015) Incumbency Data}.
93
+
94
+ {p 4 8}Load dataset (cutoff is 0 in this dataset):{p_end}
95
+ {p 8 8}{cmd:. use rddensity_senate.dta}{p_end}
96
+
97
+ {p 4 8}Bandwidth selection for manipulation test using default options: {p_end}
98
+ {p 8 8}{cmd:. rdbwdensity margin}{p_end}
99
+
100
+ {p 4 8}Bandwidth selection for manipulation test using plug-in standard errors:{p_end}
101
+ {p 8 8}{cmd:. rdbwdensity margin, vce(plugin)}{p_end}
102
+
103
+
104
+ {marker saved_results}{...}
105
+ {title:Saved results}
106
+
107
+ {p 4 8}{cmd:rddensity} saves the following in {cmd:e()}:
108
+
109
+ {synoptset 20 tabbed}{...}
110
+ {p2col 5 20 24 2: Macros}{p_end}
111
+ {synopt:{cmd:e(c)}}cutoff value{p_end}
112
+ {synopt:{cmd:e(p)}}order of the polynomial used for density estimation{p_end}
113
+ {synopt:{cmd:e(N_l)}}sample size to the left of the cutoff{p_end}
114
+ {synopt:{cmd:e(N_r)}}sample size to the right of the cutoff{p_end}
115
+ {synopt:{cmd:e(h)}}matrix of estimated bandwidth (including underlying estimated constants){p_end}
116
+ {synopt:{cmd:e(runningvar)}}running variable used{p_end}
117
+ {synopt:{cmd:e(kernel)}}kernel used{p_end}
118
+ {synopt:{cmd:e(fitmethod)}}model used{p_end}
119
+ {synopt:{cmd:e(vce)}}standard errors estimator used{p_end}
120
+
121
+
122
+ {title:References}
123
+
124
+ {p 4 8}Cattaneo, M. D., B. Frandsen, and R. Titiunik. 2015.
125
+ {browse "https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf":Randomization Inference in the Regression Discontinuity Design: An Application to the Study of Party Advantages in the U.S. Senate}.{p_end}
126
+ {p 8 8}{it:Journal of Causal Inference} 3(1): 1-24.{p_end}
127
+
128
+ {p 4 8}Cattaneo, M. D., M. Jansson, and X. Ma. 2018.
129
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2018_Stata.pdf": Manipulation Testing based on Density Discontinuity}.{p_end}
130
+ {p 8 8}{it:Stata Journal} 18(1): 234-261.{p_end}
131
+
132
+ {p 4 8}Cattaneo, M. D., M. Jansson, and X. Ma. 2020.
133
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2020_JASA.pdf":Simple Local Polynomial Density Estimators}.{p_end}
134
+ {p 8 8}{it:Journal of the American Statistical Association} 115(531): 1449-1455.{p_end}
135
+
136
+ {title:Authors}
137
+
138
+ {p 4 8}Matias D. Cattaneo, Princeton University, Princeton, NJ.
139
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
140
+
141
+ {p 4 8}Michael Jansson, University of California Berkeley, Berkeley, CA.
142
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
143
+
144
+ {p 4 8}Xinwei Ma, University of California San Diego, La Jolla, CA.
145
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
146
+
147
+
30/replication_package/Adofiles/rd_2021/rdbwselect.ado ADDED
@@ -0,0 +1,679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *!version 8.1.0 2021-02-22
2
+
3
+ capture program drop rdbwselect
4
+ program define rdbwselect, eclass
5
+ syntax anything [if] [in] [, c(real 0) fuzzy(string) deriv(real 0) p(real 1) q(real 0) covs(string) covs_drop(string) kernel(string) weights(string) bwselect(string) vce(string) scaleregul(real 1) all nochecks masspoints(string) bwcheck(real 0) bwrestrict(string) stdvars(string)]
6
+
7
+ marksample touse
8
+ preserve
9
+ qui keep if `touse'
10
+ tokenize "`anything'"
11
+ local y `1'
12
+ local x `2'
13
+ local kernel = lower("`kernel'")
14
+ local bwselect = lower("`bwselect'")
15
+
16
+ ******************** Set VCE ***************************
17
+ local nnmatch = 3
18
+ tokenize `vce'
19
+ local w : word count `vce'
20
+ if `w' == 1 {
21
+ local vce_select `"`1'"'
22
+ }
23
+ if `w' == 2 {
24
+ local vce_select `"`1'"'
25
+ if ("`vce_select'"=="nn") local nnmatch `"`2'"'
26
+ if ("`vce_select'"=="cluster" | "`vce_select'"=="nncluster") local clustvar `"`2'"'
27
+ }
28
+ if `w' == 3 {
29
+ local vce_select `"`1'"'
30
+ local clustvar `"`2'"'
31
+ local nnmatch `"`3'"'
32
+ if ("`vce_select'"!="cluster" & "`vce_select'"!="nncluster") di as error "{err}{cmd:vce()} incorrectly specified"
33
+ }
34
+ if `w' > 3 {
35
+ di as error "{err}{cmd:vce()} incorrectly specified"
36
+ exit 125
37
+ }
38
+
39
+ local vce_type = "NN"
40
+ if ("`vce_select'"=="hc0") local vce_type = "HC0"
41
+ if ("`vce_select'"=="hc1") local vce_type = "HC1"
42
+ if ("`vce_select'"=="hc2") local vce_type = "HC2"
43
+ if ("`vce_select'"=="hc3") local vce_type = "HC3"
44
+ if ("`vce_select'"=="cluster") local vce_type = "Cluster"
45
+ if ("`vce_select'"=="nncluster") local vce_type = "NNcluster"
46
+
47
+ if ("`vce_select'"=="cluster" | "`vce_select'"=="nncluster") local cluster = "cluster"
48
+ if ("`vce_select'"=="cluster") local vce_select = "hc0"
49
+ if ("`vce_select'"=="nncluster") local vce_select = "nn"
50
+ if ("`vce_select'"=="") local vce_select = "nn"
51
+
52
+ ******************** Set Fuzzy***************************
53
+ tokenize `fuzzy'
54
+ local w : word count `fuzzy'
55
+ if `w' == 1 {
56
+ local fuzzyvar `"`1'"'
57
+ }
58
+ if `w' == 2 {
59
+ local fuzzyvar `"`1'"'
60
+ local sharpbw `"`2'"'
61
+ if `"`2'"' != "sharpbw" {
62
+ di as error "{err}fuzzy() only accepts sharpbw as a second input"
63
+ exit 125
64
+ }
65
+ }
66
+ if `w' >= 3 {
67
+ di as error "{err}{cmd:fuzzy()} only accepts two inputs"
68
+ exit 125
69
+ }
70
+ ************************************************************
71
+
72
+ **** DROP MISSINGS ******************************************
73
+ qui drop if `y'==. | `x'==.
74
+ if ("`cluster'"!="") qui drop if `clustvar'==.
75
+ if ("`fuzzy'"~="") {
76
+ qui drop if `fuzzyvar'==.
77
+ qui su `fuzzyvar'
78
+ *qui replace `fuzzyvar' = `fuzzyvar'/r(sd)
79
+ }
80
+
81
+ if ("`covs'"~="") {
82
+ qui ds `covs', alpha
83
+ local covs_list = r(varlist)
84
+ local ncovs: word count `covs_list'
85
+ foreach z in `covs_list' {
86
+ qui drop if `z'==.
87
+ }
88
+ }
89
+
90
+
91
+ **** CHECK colinearity ******************************************
92
+ local covs_drop_coll = 0
93
+ if ("`covs_drop'"=="") local covs_drop = "pinv"
94
+ if ("`covs'"~="") {
95
+
96
+ if ("`covs_drop'"=="invsym") local covs_drop_coll = 1
97
+ if ("`covs_drop'"=="pinv") local covs_drop_coll = 2
98
+
99
+ qui _rmcoll `covs_list'
100
+ local nocoll_controls_cat `r(varlist)'
101
+ local nocoll_controls ""
102
+ foreach myString of local nocoll_controls_cat {
103
+ if ~strpos("`myString'", "o."){
104
+ if ~strpos("`myString'", "MYRUNVAR"){
105
+ local nocoll_controls "`nocoll_controls' `myString'"
106
+ }
107
+ }
108
+ }
109
+ local covs_new `nocoll_controls'
110
+ qui ds `covs_new', alpha
111
+ local covs_list_new = r(varlist)
112
+ local ncovs_new: word count `covs_list_new'
113
+
114
+ if (`ncovs_new'<`ncovs') {
115
+ if ("`covs_drop'"=="off") {
116
+ di as error "{err}Multicollinearity issue detected in {cmd:covs}. Please rescale and/or remove redundant covariates, or add {cmd:covs_drop} option."
117
+ exit 125
118
+ }
119
+ else {
120
+ local ncovs = "`ncovs_new'"
121
+ local covs_list = "`covs_list_new'"
122
+ *local covs_drop_coll = 1
123
+ }
124
+ }
125
+ }
126
+
127
+
128
+
129
+
130
+
131
+ **** DEFAULTS ***************************************
132
+ if ("`masspoints'"=="") local masspoints = "adjust"
133
+ if ("`stdvars'"=="") local stdvars = "off"
134
+ if ("`bwrestrict'"=="") local bwrestrict = "on"
135
+ *****************************************************************
136
+
137
+ qui su `x', d
138
+ local x_min = r(min)
139
+ local x_max = r(max)
140
+ local N = r(N)
141
+ local x_iq = r(p75)-r(p25)
142
+ local x_sd = r(sd)
143
+
144
+ if ("`deriv'">"0" & "`p'"=="1" & "`q'"=="0") local p = (`deriv'+1)
145
+ if ("`q'"=="0") local q = (`p'+1)
146
+
147
+ **************************** BEGIN ERROR CHECKING ************************************************
148
+ if ("`nochecks'"=="") {
149
+
150
+ if (`c'<=`x_min' | `c'>=`x_max'){
151
+ di as error "{err}{cmd:c()} should be set within the range of `x'"
152
+ exit 125
153
+ }
154
+
155
+ if (`N'<20){
156
+ di as error "{err}Not enough observations to perform bandwidth calculations"
157
+ exit 2001
158
+ }
159
+
160
+ if ("`kernel'"~="uni" & "`kernel'"~="uniform" & "`kernel'"~="tri" & "`kernel'"~="triangular" & "`kernel'"~="epa" & "`kernel'"~="epanechnikov" & "`kernel'"~="" ){
161
+ di as error "{err}{cmd:kernel()} incorrectly specified"
162
+ exit 7
163
+ }
164
+
165
+ if ("`bwselect'"=="CCT" | "`bwselect'"=="IK" | "`bwselect'"=="CV" |"`bwselect'"=="cct" | "`bwselect'"=="ik" | "`bwselect'"=="cv"){
166
+ di as error "{err}{cmd:bwselect()} options IK, CCT and CV have been depricated. Please see help for new options"
167
+ exit 7
168
+ }
169
+
170
+ if ("`bwselect'"!="mserd" & "`bwselect'"!="msetwo" & "`bwselect'"!="msesum" & "`bwselect'"!="msecomb1" & "`bwselect'"!="msecomb2" & "`bwselect'"!="cerrd" & "`bwselect'"!="certwo" & "`bwselect'"!="cersum" & "`bwselect'"!="cercomb1" & "`bwselect'"!="cercomb2" & "`bwselect'"~=""){
171
+ di as error "{err}{cmd:bwselect()} incorrectly specified"
172
+ exit 7
173
+ }
174
+
175
+ if ("`vce_select'"~="nn" & "`vce_select'"~="" & "`vce_select'"~="cluster" & "`vce_select'"~="nncluster" & "`vce_select'"~="hc1" & "`vce_select'"~="hc2" & "`vce_select'"~="hc3" & "`vce_select'"~="hc0"){
176
+ di as error "{err}{cmd:vce()} incorrectly specified"
177
+ exit 7
178
+ }
179
+
180
+ if ("`p'"<"0" | "`q'"<="0" | "`deriv'"<"0" | "`nnmatch'"<="0" ){
181
+ di as error "{err}{cmd:p()}, {cmd:q()}, {cmd:deriv()}, {cmd:nnmatch()} imson should be positive"
182
+ exit 411
183
+ }
184
+
185
+ if ("`p'">="`q'" & "`q'">"0"){
186
+ di as error "{err}{cmd:q()} should be higher than {cmd:p()}"
187
+ exit 125
188
+ }
189
+
190
+ if ("`deriv'">"`p'" & "`deriv'">"0" ){
191
+ di as error "{err}{cmd:deriv()} can not be higher than {cmd:p()}"
192
+ exit 125
193
+ }
194
+
195
+ if ("`p'">"0" ) {
196
+ local p_round = round(`p')/`p'
197
+ local q_round = round(`q')/`q'
198
+ local d_round = round(`deriv'+1)/(`deriv'+1)
199
+ local m_round = round(`nnmatch')/`nnmatch'
200
+
201
+ if (`p_round'!=1 | `q_round'!=1 |`d_round'!=1 |`m_round'!=1 ){
202
+ di as error "{err}{cmd:p()}, {cmd:q()}, {cmd:deriv()} and {cmd:nnmatch()} should be integers"
203
+ exit 126
204
+ }
205
+ }
206
+ }
207
+
208
+ if ("`kernel'"=="epanechnikov" | "`kernel'"=="epa") {
209
+ local kernel_type = "Epanechnikov"
210
+ local C_c = 2.34
211
+ }
212
+ else if ("`kernel'"=="uniform" | "`kernel'"=="uni") {
213
+ local kernel_type = "Uniform"
214
+ local C_c = 1.843
215
+ }
216
+ else {
217
+ local kernel_type = "Triangular"
218
+ local C_c = 2.576
219
+ }
220
+
221
+ if ("`vce_select'"=="nn" | "`masspoints'"=="check" | "`masspoints'"=="adjust") {
222
+ sort `x', stable
223
+ if ("`vce_select'"=="nn") {
224
+ tempvar dups dupsid
225
+ by `x': gen dups = _N
226
+ by `x': gen dupsid = _n
227
+ }
228
+ }
229
+
230
+
231
+ mata{
232
+ c = `c'
233
+ p = `p'
234
+ q = `q'
235
+ covs_drop_coll = `covs_drop_coll'
236
+ nnmatch = strtoreal("`nnmatch'")
237
+
238
+ Y = st_data(.,("`y'"), 0); X = st_data(.,("`x'"), 0)
239
+
240
+ BWp = min((`x_sd',`x_iq'/1.349))
241
+ x_sd = y_sd = 1
242
+ if ("`stdvars'"=="on") {
243
+ y_sd = sqrt(variance(Y))
244
+ x_sd = sqrt(variance(X))
245
+ Y = Y/y_sd
246
+ X = X/x_sd
247
+ c = c/x_sd
248
+ BWp = min((1, (`x_iq'/x_sd)/1.349))
249
+ }
250
+
251
+ ind_r = X:>=c
252
+ ind_l = abs(1:-ind_r)
253
+
254
+ X_l = select(X,ind_l); X_r = select(X,ind_r)
255
+ Y_l = select(Y,ind_l); Y_r = select(Y,ind_r)
256
+
257
+ N = length(X); N_l = length(X_l); N_r = length(X_r)
258
+
259
+ x_l_min = min(X_l); x_l_max = max(X_l)
260
+ x_r_min = min(X_r); x_r_max = max(X_r)
261
+
262
+ range_l = c - x_l_min
263
+ range_r = x_r_max - c
264
+
265
+ dZ=Z_l=Z_r=T_l=T_r=Cind_l=Cind_r=g_l=g_r=dups_l=dups_r=dupsid_l=dupsid_r=0
266
+
267
+ if ("`vce_select'"=="nn") {
268
+ dups = st_data(.,("dups"), 0); dupsid = st_data(.,("dupsid"), 0)
269
+ dups_l = select(dups,ind_l); dups_r = select(dups,ind_r)
270
+ dupsid_l = select(dupsid,ind_l); dupsid_r = select(dupsid,ind_r)
271
+ }
272
+
273
+ if ("`covs'"~="") {
274
+ Z = st_data(.,tokens("`covs_list'"), 0)
275
+ dZ = cols(Z)
276
+ Z_l = select(Z,ind_l); Z_r = select(Z,ind_r)
277
+ }
278
+
279
+ if ("`fuzzy'"~="") {
280
+ T = st_data(.,("`fuzzyvar'"), 0)
281
+ T_l = select(T,ind_l); T_r = select(T,ind_r)
282
+ if (variance(T_l)==0 | variance(T_r)==0){
283
+ T_l = T_r =0
284
+ st_local("perf_comp","perf_comp")
285
+ }
286
+ if ("`sharpbw'"!=""){
287
+ T_l = T_r =0
288
+ st_local("sharpbw","sharpbw")
289
+ }
290
+ }
291
+
292
+ C_l=C_r=0
293
+ if ("`cluster'"!="") {
294
+ C = st_data(.,("`clustvar'"), 0)
295
+ C_l = select(C,ind_l); C_r = select(C,ind_r)
296
+ indC_l = order(C_l,1); indC_r = order(C_r,1)
297
+ g_l = rows(panelsetup(C_l[indC_l],1)); g_r = rows(panelsetup(C_r[indC_r],1))
298
+ st_numscalar("g_l", g_l); st_numscalar("g_r", g_r)
299
+ }
300
+
301
+ fw_l = fw_r = 0
302
+ if ("`weights'"~="") {
303
+ fw = st_data(.,("`weights'"), 0)
304
+ fw_l = select(fw,ind_l); fw_r = select(fw,ind_r)
305
+ }
306
+
307
+ mN = N
308
+ bwcheck = `bwcheck'
309
+ masspoints_found = 0
310
+ if ("`masspoints'"=="check" | "`masspoints'"=="adjust") {
311
+ X_uniq_l = sort(uniqrows(X_l),-1)
312
+ X_uniq_r = uniqrows(X_r)
313
+ M_l = length(X_uniq_l)
314
+ M_r = length(X_uniq_r)
315
+ M = M_l + M_r
316
+ st_numscalar("M_l", M_l); st_numscalar("M_r", M_r)
317
+ mass_l = 1-M_l/N_l
318
+ mass_r = 1-M_r/N_r
319
+ if (mass_l>=0.1 | mass_r>=0.1){
320
+ masspoints_found = 1
321
+ display("{err}Mass points detected in the running variable.")
322
+ if ("`masspoints'"=="adjust" & "`bwcheck'"=="0") bwcheck = 10
323
+ if ("`masspoints'"=="check") display("{err}Try using option {cmd:masspoints(adjust)}")
324
+ }
325
+ }
326
+
327
+ *if ("`masspoints'"=="adjust") mN = M
328
+
329
+
330
+ ***********************************************************************
331
+ ******** Computing bandwidth selector *********************************
332
+ ***********************************************************************
333
+ c_bw = `C_c'*BWp*mN^(-1/5)
334
+ if ("`masspoints'"=="adjust") c_bw = `C_c'*BWp*M^(-1/5)
335
+
336
+ if ("`bwrestrict'"=="on") {
337
+ bw_max = max((range_l,range_r))
338
+ c_bw = min((c_bw, bw_max))
339
+ }
340
+
341
+ if (bwcheck > 0) {
342
+ bwcheck_l = min((bwcheck, M_l))
343
+ bwcheck_r = min((bwcheck, M_r))
344
+ bw_min_l = abs(X_uniq_l:-c)[bwcheck_l] + 1e-8
345
+ bw_min_r = abs(X_uniq_r:-c)[bwcheck_r] + 1e-8
346
+ c_bw = max((c_bw, bw_min_l, bw_min_r))
347
+ }
348
+
349
+ c_bw_l = c_bw_r = c_bw
350
+
351
+
352
+ *** Step 1: d_bw
353
+ C_d_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=q+1, nu=q+1, o_B=q+2, h_V=c_bw_l, h_B=range_l+1e-8, 0, "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
354
+ C_d_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=q+1, nu=q+1, o_B=q+2, h_V=c_bw_r, h_B=range_r+1e-8, 0, "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
355
+
356
+ *printf("i=%g\n ",C_d_l[5])
357
+ *printf("i=%g\n ",C_d_r[5])
358
+
359
+
360
+ if (C_d_l[1]==. | C_d_l[2]==. | C_d_l[3]==. |C_d_r[1]==. | C_d_r[2]==. | C_d_r[3]==.) printf("{err}Invertibility problem in the computation of preliminary bandwidth. Try checking for mass points with option {cmd:masspoints(check)}.\n")
361
+ if (C_d_l[1]==0 | C_d_l[2]==0 | C_d_r[1]==0 | C_d_r[2]==0) printf("{err}Not enough variability to compute the preliminary bandwidth. Try checking for mass points with option {cmd:masspoints(check)}.\n")
362
+
363
+
364
+
365
+ *** TWO
366
+ if ("`bwselect'"=="msetwo" | "`bwselect'"=="certwo" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb2" | "`all'"!="") {
367
+ d_bw_l = ( (C_d_l[1] / C_d_l[2]^2) * (N/mN) )^C_d_l[4]
368
+ d_bw_r = ( (C_d_r[1] / C_d_r[2]^2) * (N/mN) )^C_d_l[4]
369
+ if ("`bwrestrict'"=="on") {
370
+ d_bw_l = min((d_bw_l, range_l))
371
+ d_bw_r = min((d_bw_r, range_r))
372
+ }
373
+ if (bwcheck > 0) {
374
+ d_bw_l = max((d_bw_l, bw_min_l))
375
+ d_bw_r = max((d_bw_r, bw_min_r))
376
+ }
377
+ C_b_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=q, nu=p+1, o_B=q+1, h_V=c_bw_l, h_B=d_bw_l, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
378
+ b_bw_l = ( (C_b_l[1] / (C_b_l[2]^2 + `scaleregul'*C_b_l[3])) * (N/mN) )^C_b_l[4]
379
+ C_b_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=q, nu=p+1, o_B=q+1, h_V=c_bw_r, h_B=d_bw_r, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
380
+ b_bw_r = ( (C_b_r[1] / (C_b_r[2]^2 + `scaleregul'*C_b_r[3])) * (N/mN) )^C_b_l[4]
381
+ if ("`bwrestrict'"=="on") {
382
+ b_bw_l = min((b_bw_l, range_l))
383
+ b_bw_r = min((b_bw_r, range_r))
384
+ }
385
+ *if ("`bwcheck'" != "0") {
386
+ * b_bw_l = max((b_bw_l, bw_min_l))
387
+ * b_bw_r = max((b_bw_r, bw_min_r))
388
+ *}
389
+ C_h_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=p, nu=`deriv', o_B=q, h_V=c_bw_l, h_B=b_bw_l, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
390
+ h_bw_l = ( (C_h_l[1] / (C_h_l[2]^2 + `scaleregul'*C_h_l[3])) * (N/mN) )^C_h_l[4]
391
+ C_h_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=p, nu=`deriv', o_B=q, h_V=c_bw_r, h_B=b_bw_r, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
392
+ h_bw_r = ( (C_h_r[1] / (C_h_r[2]^2 + `scaleregul'*C_h_r[3])) * (N/mN) )^C_h_l[4]
393
+
394
+ if ("`bwrestrict'"=="on") {
395
+ h_bw_l = min((h_bw_l, range_l))
396
+ h_bw_r = min((h_bw_r, range_r))
397
+ }
398
+ *if ("`bwcheck'" != "0") {
399
+ * h_bw_l = max((h_bw_l, bw_min_l))
400
+ * h_bw_r = max((h_bw_r, bw_min_r))
401
+ *}
402
+ }
403
+
404
+ *** SUM
405
+ if ("`bwselect'"=="msesum" | "`bwselect'"=="cersum" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2" | "`all'"!="") {
406
+ d_bw_s = ( ((C_d_l[1] + C_d_r[1]) / (C_d_r[2] + C_d_l[2])^2) * (N/mN) )^C_d_l[4]
407
+ if ("`bwrestrict'"=="on") d_bw_s = min((d_bw_s, bw_max))
408
+ if (bwcheck > 0) d_bw_s = max((d_bw_s, bw_min_l, bw_min_r))
409
+ C_b_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=q, nu=p+1, o_B=q+1, h_V=c_bw_l, h_B=d_bw_s, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
410
+ C_b_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=q, nu=p+1, o_B=q+1, h_V=c_bw_r, h_B=d_bw_s, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
411
+ b_bw_s = ( ((C_b_l[1] + C_b_r[1]) / ((C_b_r[2] + C_b_l[2])^2 + `scaleregul'*(C_b_r[3]+C_b_l[3]))) * (N/mN) )^C_b_l[4]
412
+ if ("`bwrestrict'"=="on") b_bw_s = min((b_bw_s, bw_max))
413
+ *if ("`bwcheck'" != "0") b_bw_s = max((b_bw_s, bw_min_l, bw_min_r))
414
+ C_h_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=p, nu=`deriv', o_B=q, h_V=c_bw_l, h_B=b_bw_s, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
415
+ C_h_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=p, nu=`deriv', o_B=q, h_V=c_bw_r, h_B=b_bw_s, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
416
+ h_bw_s = ( ((C_h_l[1] + C_h_r[1]) / ((C_h_r[2] + C_h_l[2])^2 + `scaleregul'*(C_h_r[3] + C_h_l[3]))) * (N/mN) )^C_h_l[4]
417
+ if ("`bwrestrict'"=="on") h_bw_s = min((h_bw_s, bw_max))
418
+ *if ("`bwcheck'" != "0") h_bw_s = max((h_bw_s, bw_min_l, bw_min_r))
419
+ }
420
+
421
+ *** RD
422
+ if ("`bwselect'"=="mserd" | "`bwselect'"=="cerrd" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2" | "`bwselect'"=="" | "`all'"!="" ) {
423
+ d_bw_d = ( ((C_d_l[1] + C_d_r[1]) / (C_d_r[2] - C_d_l[2])^2) * (N/mN) )^C_d_l[4]
424
+ if ("`bwrestrict'"=="on") d_bw_d = min((d_bw_d, bw_max))
425
+ if (bwcheck > 0) d_bw_d = max((d_bw_d, bw_min_l, bw_min_r))
426
+ C_b_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=q, nu=p+1, o_B=q+1, h_V=c_bw_l, h_B=d_bw_d, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
427
+ C_b_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=q, nu=p+1, o_B=q+1, h_V=c_bw_r, h_B=d_bw_d, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
428
+ b_bw_d = ( ((C_b_l[1] + C_b_r[1]) / ((C_b_r[2] - C_b_l[2])^2 + `scaleregul'*(C_b_r[3] + C_b_l[3]))) * (N/mN) )^C_b_l[4]
429
+ if ("`bwrestrict'"=="on") b_bw_d = min((b_bw_d, bw_max))
430
+ *if ("`bwcheck'" != "0") b_bw_d = max((b_bw_d, bw_min_l, bw_min_r))
431
+ C_h_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=p, nu=`deriv', o_B=q, h_V=c_bw_l, h_B=b_bw_d, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_l, dupsid_l, covs_drop_coll)
432
+ C_h_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=p, nu=`deriv', o_B=q, h_V=c_bw_r, h_B=b_bw_d, `scaleregul', "`vce_select'", nnmatch, "`kernel'", dups_r, dupsid_r, covs_drop_coll)
433
+ h_bw_d = ( ((C_h_l[1] + C_h_r[1]) / ((C_h_r[2] - C_h_l[2])^2 + `scaleregul'*(C_h_r[3] + C_h_l[3]))) * (N/mN) )^C_h_l[4]
434
+ if ("`bwrestrict'"=="on") h_bw_d = min((h_bw_d, bw_max))
435
+
436
+ *if ("`bwcheck'" != "0") h_bw_d = max((h_bw_d, bw_min_l, bw_min_r))
437
+ }
438
+
439
+
440
+
441
+ if (C_b_l[1]==0 | C_b_l[2]==0 | C_b_r[1]==0 | C_b_r[2]==0 |C_b_l[1]==. | C_b_l[2]==. | C_b_l[3]==. | C_b_r[1]==. | C_b_r[2]==. | C_b_r[3]==.) printf("{err}Not enough variability to compute the bias bandwidth (b). Try checking for mass points with option {cmd:masspoints(check)}. \n")
442
+ if (C_h_l[1]==0 | C_h_l[2]==0 | C_h_r[1]==0 | C_h_r[2]==0 |C_h_l[1]==. | C_h_l[2]==. | C_h_l[3]==. | C_h_r[1]==. | C_h_r[2]==. | C_h_r[3]==.) printf("{err}Not enough variability to compute the loc. poly. bandwidth (h). Try checking for mass points with option {cmd:masspoints(check)}.\n")
443
+
444
+ st_numscalar("N", N)
445
+ st_numscalar("N_l", N_l)
446
+ st_numscalar("N_r", N_r)
447
+ st_numscalar("x_l_min", x_sd*x_l_min)
448
+ st_numscalar("x_l_max", x_sd*x_l_max)
449
+ st_numscalar("x_r_min", x_sd*x_r_min)
450
+ st_numscalar("x_r_max", x_sd*x_r_max)
451
+ st_numscalar("masspoints_found", masspoints_found)
452
+
453
+ if ("`bwselect'"=="mserd" | "`bwselect'"=="cerrd" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2" | "`bwselect'"=="" | "`all'"!="" ) {
454
+ h_mserd = x_sd*h_bw_d
455
+ b_mserd = x_sd*b_bw_d
456
+ st_numscalar("h_mserd", h_mserd); st_numscalar("b_mserd", b_mserd)
457
+ }
458
+ if ("`bwselect'"=="msesum" | "`bwselect'"=="cersum" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2" | "`all'"!="") {
459
+ h_msesum = x_sd*h_bw_s
460
+ b_msesum = x_sd*b_bw_s
461
+ st_numscalar("h_msesum", h_msesum); st_numscalar("b_msesum", b_msesum)
462
+ }
463
+ if ("`bwselect'"=="msetwo" | "`bwselect'"=="certwo" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb2" | "`all'"!="") {
464
+ h_msetwo_l = x_sd*h_bw_l
465
+ h_msetwo_r = x_sd*h_bw_r
466
+ b_msetwo_l = x_sd*b_bw_l
467
+ b_msetwo_r = x_sd*b_bw_r
468
+ st_numscalar("h_msetwo_l", h_msetwo_l); st_numscalar("h_msetwo_r", h_msetwo_r)
469
+ st_numscalar("b_msetwo_l", b_msetwo_l); st_numscalar("b_msetwo_r", b_msetwo_r)
470
+ }
471
+ if ("`bwselect'"=="msecomb1" | "`bwselect'"=="cercomb1" | "`all'"!="" ) {
472
+ h_msecomb1 = min((h_mserd,h_msesum))
473
+ b_msecomb1 = min((b_mserd,b_msesum))
474
+ st_numscalar("h_msecomb1", h_msecomb1); st_numscalar("b_msecomb1", b_msecomb1)
475
+ }
476
+ if ("`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb2" | "`all'"!="" ) {
477
+ h_msecomb2_l = (sort((h_mserd,h_msesum,h_msetwo_l)',1))[2]
478
+ h_msecomb2_r = (sort((h_mserd,h_msesum,h_msetwo_r)',1))[2]
479
+ b_msecomb2_l = (sort((b_mserd,b_msesum,b_msetwo_l)',1))[2]
480
+ b_msecomb2_r = (sort((b_mserd,b_msesum,b_msetwo_r)',1))[2]
481
+ st_numscalar("h_msecomb2_l", h_msecomb2_l); st_numscalar("h_msecomb2_r", h_msecomb2_r);
482
+ st_numscalar("b_msecomb2_l", b_msecomb2_l); st_numscalar("b_msecomb2_r", b_msecomb2_r);
483
+ }
484
+
485
+ cer_h = N^(-(`p'/((3+`p')*(3+2*`p'))))
486
+ if ("`cluster'"!="") cer_h = (g_l+g_r)^(-(`p'/((3+`p')*(3+2*`p'))))
487
+ cer_b = 1
488
+
489
+ if ("`bwselect'"=="cerrd" | "`all'"!="" ){
490
+ h_cerrd = h_mserd*cer_h
491
+ b_cerrd = b_mserd*cer_b
492
+ st_numscalar("h_cerrd", h_cerrd); st_numscalar("b_cerrd", b_cerrd)
493
+ }
494
+ if ("`bwselect'"=="cersum" | "`all'"!="" ){
495
+ h_cersum = h_msesum*cer_h
496
+ b_cersum= b_msesum*cer_b
497
+ st_numscalar("h_cersum", h_cersum); st_numscalar("b_cersum", b_cersum)
498
+ }
499
+ if ("`bwselect'"=="certwo" | "`all'"!="" ){
500
+ h_certwo_l = h_msetwo_l*cer_h
501
+ h_certwo_r = h_msetwo_r*cer_h
502
+ b_certwo_l = b_msetwo_l*cer_b
503
+ b_certwo_r = b_msetwo_r*cer_b
504
+ st_numscalar("h_certwo_l", h_certwo_l); st_numscalar("h_certwo_r", h_certwo_r);
505
+ st_numscalar("b_certwo_l", b_certwo_l); st_numscalar("b_certwo_r", b_certwo_r);
506
+ }
507
+ if ("`bwselect'"=="cercomb1" | "`all'"!="" ){
508
+ h_cercomb1 = h_msecomb1*cer_h
509
+ b_cercomb1 = b_msecomb1*cer_b
510
+ st_numscalar("h_cercomb1", h_cercomb1); st_numscalar("b_cercomb1", b_cercomb1)
511
+ }
512
+ if ("`bwselect'"=="cercomb2" | "`all'"!="" ){
513
+ h_cercomb2_l = h_msecomb2_l*cer_h
514
+ h_cercomb2_r = h_msecomb2_r*cer_h
515
+ b_cercomb2_l = b_msecomb2_l*cer_b
516
+ b_cercomb2_r = b_msecomb2_r*cer_b
517
+ st_numscalar("h_cercomb2_l", h_cercomb2_l); st_numscalar("h_cercomb2_r", h_cercomb2_r);
518
+ st_numscalar("b_cercomb2_l", b_cercomb2_l); st_numscalar("b_cercomb2_r", b_cercomb2_r);
519
+ }
520
+ }
521
+
522
+ *******************************************************************************
523
+ disp ""
524
+ if ("`fuzzy'"=="") {
525
+ if ("`covs'"=="") {
526
+ if ("`deriv'"=="0") disp in yellow "Bandwidth estimators for sharp RD local polynomial regression."
527
+ else if ("`deriv'"=="1") disp in yellow "Bandwidth estimators for sharp kink RD local polynomial regression."
528
+ else disp in yellow "Bandwidth estimators for sharp RD local polynomial regression. Derivative of order " `deriv' "."
529
+ }
530
+ else {
531
+ if ("`deriv'"=="0") disp in yellow "Bandwidth estimators for covariate-adjusted sharp RD local polynomial regression."
532
+ else if ("`deriv'"=="1") disp in yellow "Bandwidth estimators for covariate-adjusted sharp kink RD local polynomial regression."
533
+ else disp in yellow "Bandwidth estimators for covariate-adjusted sharp RD local polynomial regression. Derivative of order " `deriv' "."
534
+ }
535
+ }
536
+ else {
537
+ if ("`covs'"=="") {
538
+ if ("`deriv'"=="0") disp in yellow "Bandwidth estimators for fuzzy RD local polynomial regression."
539
+ else if ("`deriv'"=="1") disp in yellow "Bandwidth estimators for fuzzy kink RD local polynomial regression."
540
+ else disp in yellow "Bandwidth estimators for fuzzy RD local polynomial regression. Derivative of order " `deriv' "."
541
+ }
542
+ else {
543
+ if ("`deriv'"=="0") disp in yellow "Bandwidth estimators for covariate-adjusted fuzzy RD local polynomial regression."
544
+ else if ("`deriv'"=="1") disp in yellow "Bandwidth estimators for covariate-adjusted fuzzy kink RD local polynomial regression."
545
+ else disp in yellow "Bandwidth estimators for covariate-adjusted fuzzy RD local polynomial regression. Derivative of order " `deriv' "."
546
+ }
547
+ }
548
+ disp ""
549
+
550
+ disp in smcl in gr "{ralign 18: Cutoff c = `c_orig'}" _col(19) " {c |} " _col(21) in gr "Left of " in yellow "c" _col(33) in gr "Right of " in yellow "c" _col(55) in gr "Number of obs = " in yellow %10.0f scalar(N)
551
+ disp in smcl in gr "{hline 19}{c +}{hline 22}" _col(55) in gr "Kernel = " in yellow "{ralign 10:`kernel_type'}"
552
+ disp in smcl in gr "{ralign 18:Number of obs}" _col(19) " {c |} " _col(21) as result %9.0f scalar(N_l) _col(34) %9.0f scalar(N_r) _col(55) in gr "VCE method = " in yellow "{ralign 10:`vce_type'}"
553
+ disp in smcl in gr "{ralign 18:Min of `x'}" _col(19) " {c |} " _col(21) as result %9.3f scalar(x_l_min) _col(34) %9.3f scalar(x_r_min)
554
+ disp in smcl in gr "{ralign 18:Max of `x'}" _col(19) " {c |} " _col(21) as result %9.3f scalar(x_l_max) _col(34) %9.3f scalar(x_r_max)
555
+ disp in smcl in gr "{ralign 18:Order est. (p)}" _col(19) " {c |} " _col(21) as result %9.0f `p' _col(34) %9.0f `p'
556
+ disp in smcl in gr "{ralign 18:Order bias (q)}" _col(19) " {c |} " _col(21) as result %9.0f `q' _col(34) %9.0f `q'
557
+ if ("`masspoints'"=="check" | masspoints_found==1) disp in smcl in gr "{ralign 18:Unique obs}" _col(19) " {c |} " _col(21) as result %9.0f scalar(M_l) _col(34) %9.0f scalar(M_r)
558
+ if ("`cluster'"!="") disp in smcl in gr "{ralign 18:Number of clusters}" _col(19) " {c |} " _col(21) as result %9.0f scalar(g_l) _col(34) %9.0f scalar(g_r)
559
+
560
+
561
+ disp ""
562
+ if ("`fuzzy'"=="") disp "Outcome: `y'. Running variable: `x'."
563
+ else disp in yellow "Outcome: `y'. Running variable: `x'. Treatment Status: `fuzzyvar'."
564
+ disp in smcl in gr "{hline 19}{c TT}{hline 30}{c TT}{hline 29}"
565
+ disp in smcl in gr _col(19) " {c |} " _col(30) "BW est. (h)" _col(50) " {c |} " _col(60) "BW bias (b)"
566
+ disp in smcl in gr "{ralign 18:Method}" _col(19) " {c |} " _col(22) "Left of " in yellow "c" _col(40) in green "Right of " in yellow "c" in green _col(50) " {c |} " _col(53) "Left of " in yellow "c" _col(70) in green "Right of " in yellow "c"
567
+ disp in smcl in gr "{hline 19}{c +}{hline 30}{c +}{hline 29}"
568
+
569
+ if ("`bwselect'"=="mserd" | "`bwselect'"=="" | "`all'"!="" ) {
570
+ disp in smcl in gr "{ralign 18:mserd}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_mserd) _col(41) %9.3f scalar(h_mserd) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_mserd) _col(71) %9.3f scalar(b_mserd)
571
+ }
572
+ if ("`bwselect'"=="msetwo" | "`all'"!="") {
573
+ disp in smcl in gr "{ralign 18:msetwo}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_msetwo_l) _col(41) %9.3f scalar(h_msetwo_r) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_msetwo_l) _col(71) %9.3f scalar(b_msetwo_r)
574
+ }
575
+ if ("`bwselect'"=="msesum" | "`all'"!="") {
576
+ disp in smcl in gr "{ralign 18:msesum}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_msesum) _col(41) %9.3f scalar(h_msesum) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_msesum) _col(71) %9.3f scalar(b_msesum)
577
+ }
578
+ if ("`bwselect'"=="msecomb1" | "`all'"!="" ) {
579
+ disp in smcl in gr "{ralign 18:msecomb1}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_msecomb1) _col(41) %9.3f scalar(h_msecomb1) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_msecomb1) _col(71) %9.3f scalar(b_msecomb1)
580
+ }
581
+ if ("`bwselect'"=="msecomb2" | "`all'"!="" ) {
582
+ disp in smcl in gr "{ralign 18:msecomb2}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_msecomb2_l) _col(41) %9.3f scalar(h_msecomb2_r) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_msecomb2_l) _col(71) %9.3f scalar(b_msecomb2_r)
583
+ }
584
+ if ("`all'"!="" ) disp in smcl in gr "{hline 19}{c +}{hline 30}{c +}{hline 29}"
585
+ if ("`bwselect'"=="cerrd" | "`all'"!="" ){
586
+ disp in smcl in gr "{ralign 18:cerrd}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_cerrd) _col(41) %9.3f scalar(h_cerrd) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_cerrd) _col(71) %9.3f scalar(b_cerrd)
587
+ }
588
+ if ("`bwselect'"=="certwo" | "`all'"!="" ){
589
+ disp in smcl in gr "{ralign 18:certwo}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_certwo_l) _col(41) %9.3f scalar(h_certwo_r) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_certwo_l) _col(71) %9.3f scalar(b_certwo_r)
590
+ }
591
+ if ("`bwselect'"=="cersum" | "`all'"!="" ){
592
+ disp in smcl in gr "{ralign 18:cersum}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_cersum) _col(41) %9.3f scalar(h_cersum) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_cersum) _col(71) %9.3f scalar(b_cersum)
593
+ }
594
+ if ("`bwselect'"=="cercomb1" | "`all'"!="" ){
595
+ disp in smcl in gr "{ralign 18:cercomb1}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_cercomb1) _col(41) %9.3f scalar(h_cercomb1) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_cercomb1) _col(71) %9.3f scalar(b_cercomb1)
596
+ }
597
+ if ("`bwselect'"=="cercomb2" | "`all'"!="" ){
598
+ disp in smcl in gr "{ralign 18:cercomb2}" _col(19) " {c |} " _col(22) as result %9.3f scalar(h_cercomb2_l) _col(41) %9.3f scalar(h_cercomb2_r) in green _col(50) " {c |} " _col(51) as result %9.3f scalar(b_cercomb2_l) _col(71) %9.3f scalar(b_cercomb2_r)
599
+ }
600
+ disp in smcl in gr "{hline 19}{c BT}{hline 30}{c BT}{hline 29}"
601
+ if ("`covs'"!="") di "Covariate-adjusted estimates. Additional covariates included: `ncovs'"
602
+ * if (`covs_drop_coll'>=1) di "Variables dropped due to multicollinearity."
603
+ if ("`masspoints'"=="check") di "Running variable checked for mass points."
604
+ if ("`masspoints'"=="adjust" & masspoints_found==1) di "Estimates adjusted for mass points in the running variable."
605
+
606
+ if ("`cluster'"!="") di "Std. Err. adjusted for clusters in " "`clustvar'"
607
+ if ("`scaleregul'"!="1") di "Scale regularization: " `scaleregul'
608
+ if ("`sharpbw'"~="") di in red "WARNING: bandwidths automatically computed for sharp RD estimation."
609
+ if ("`perf_comp'"~="") di in red "WARNING: bandwidths automatically computed for sharp RD estimation because perfect compliance was detected on at least one side of the threshold."
610
+
611
+ restore
612
+ ereturn clear
613
+ ereturn scalar N_l = scalar(N_l)
614
+ ereturn scalar N_r = scalar(N_r)
615
+ ereturn scalar c = `c'
616
+ ereturn scalar p = `p'
617
+ ereturn scalar q = `q'
618
+ ereturn local kernel = "`kernel_type'"
619
+ ereturn local bwselect = "`bwselect'"
620
+ ereturn local vce_select = "`vce_type'"
621
+ if ("`covs'"!="") ereturn local covs "`covs'"
622
+ if ("`cluster'"!="") ereturn local clustvar "`clustvar'"
623
+ ereturn local outcomevar "`y'"
624
+ ereturn local runningvar "`x'"
625
+ ereturn local depvar "`y'"
626
+ ereturn local cmd "rdbwselect"
627
+
628
+ if ("`bwselect'"=="mserd" | "`bwselect'"=="" | "`all'"!="" ) {
629
+ ereturn scalar h_mserd = scalar(h_mserd)
630
+ ereturn scalar b_mserd = scalar(b_mserd)
631
+ }
632
+ if ("`bwselect'"=="msesum" | "`all'"!="") {
633
+ ereturn scalar h_msesum = scalar(h_msesum)
634
+ ereturn scalar b_msesum = scalar(b_msesum)
635
+ }
636
+ if ("`bwselect'"=="msetwo" | "`all'"!="") {
637
+ ereturn scalar h_msetwo_l = scalar(h_msetwo_l)
638
+ ereturn scalar h_msetwo_r = scalar(h_msetwo_r)
639
+ ereturn scalar b_msetwo_l = scalar(b_msetwo_l)
640
+ ereturn scalar b_msetwo_r = scalar(b_msetwo_r)
641
+ }
642
+ if ("`bwselect'"=="msecomb1" | "`all'"!="" ) {
643
+ ereturn scalar h_msecomb1 = scalar(h_msecomb1)
644
+ ereturn scalar b_msecomb1 = scalar(b_msecomb1)
645
+ }
646
+ if ("`bwselect'"=="msecomb2" | "`all'"!="" ) {
647
+ ereturn scalar h_msecomb2_l = scalar(h_msecomb2_l)
648
+ ereturn scalar h_msecomb2_r = scalar(h_msecomb2_r)
649
+ ereturn scalar b_msecomb2_l = scalar(b_msecomb2_l)
650
+ ereturn scalar b_msecomb2_r = scalar(b_msecomb2_r)
651
+ }
652
+ if ("`bwselect'"=="cerrd" | "`all'"!="") {
653
+ ereturn scalar h_cerrd = scalar(h_cerrd)
654
+ ereturn scalar b_cerrd = scalar(b_cerrd)
655
+ }
656
+ if ("`bwselect'"=="cersum" | "`all'"!="") {
657
+ ereturn scalar h_cersum = scalar(h_cersum)
658
+ ereturn scalar b_cersum = scalar(b_cersum)
659
+ }
660
+ if ("`bwselect'"=="certwo" | "`all'"!="") {
661
+ ereturn scalar h_certwo_l = scalar(h_certwo_l)
662
+ ereturn scalar h_certwo_r = scalar(h_certwo_r)
663
+ ereturn scalar b_certwo_l = scalar(b_certwo_l)
664
+ ereturn scalar b_certwo_r = scalar(b_certwo_r)
665
+ }
666
+ if ("`bwselect'"=="cercomb1" | "`all'"!="") {
667
+ ereturn scalar h_cercomb1 = scalar(h_cercomb1)
668
+ ereturn scalar b_cercomb1 = scalar(b_cercomb1)
669
+ }
670
+ if ("`bwselect'"=="cercomb2" | "`all'"!="") {
671
+ ereturn scalar h_cercomb2_l = scalar(h_cercomb2_l)
672
+ ereturn scalar h_cercomb2_r = scalar(h_cercomb2_r)
673
+ ereturn scalar b_cercomb2_l = scalar(b_cercomb2_l)
674
+ ereturn scalar b_cercomb2_r = scalar(b_cercomb2_r)
675
+ }
676
+
677
+ mata mata clear
678
+
679
+ end
30/replication_package/Adofiles/rd_2021/rdbwselect.sthlp ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *!version 8.1.0 2021-02-22}{...}
3
+ {viewerjumpto "Syntax" "rdbwselect##syntax"}{...}
4
+ {viewerjumpto "Description" "rdbwselect##description"}{...}
5
+ {viewerjumpto "Options" "rdbwselect##options"}{...}
6
+ {viewerjumpto "Examples" "rdbwselect##examples"}{...}
7
+ {viewerjumpto "Stored results" "rdbwselect##stored_results"}{...}
8
+ {viewerjumpto "References" "rdbwselect##references"}{...}
9
+ {viewerjumpto "Authors" "rdbwselect##authors"}{...}
10
+
11
+ {title:Title}
12
+
13
+ {p 4 8}{cmd:rdbwselect} {hline 2} Bandwidth Selection Procedures for Local Polynomial Regression Discontinuity Estimators.{p_end}
14
+
15
+ {marker syntax}{...}
16
+ {title:Syntax}
17
+
18
+ {p 4 8}{cmd:rdbwselect } {it:depvar} {it:indepvar} {ifin}
19
+ [{cmd:,}
20
+ {cmd:c(}{it:#}{cmd:)}
21
+ {cmd:fuzzy(}{it:fuzzyvar [sharpbw]}{cmd:)}
22
+ {cmd:deriv(}{it:#}{cmd:)}
23
+ {cmd:p(}{it:#}{cmd:)}
24
+ {cmd:q(}{it:#}{cmd:)}
25
+ {cmd:covs(}{it:covars}{cmd:)}
26
+ {cmd:covs_drop(}{it:covsdropoption}{cmd:)}
27
+ {cmd:kernel(}{it:kernelfn}{cmd:)}
28
+ {cmd:weights(}{it:weightsvar}{cmd:)}
29
+ {cmd:bwselect(}{it:bwmethod}{cmd:)}
30
+ {cmd:all}
31
+ {cmd:scaleregul(}{it:#}{cmd:)}
32
+ {cmd:masspoints(}{it:masspointsoption}{cmd:)}
33
+ {cmd:bwcheck(}{it:bwcheck}{cmd:)}
34
+ {cmd:bwrestrict(}{it:bwropt}{cmd:)}
35
+ {cmd:stdvars(}{it:stdopt}{cmd:)}
36
+ {cmd:vce(}{it:vcetype [vceopt1 vceopt2]}{cmd:)}
37
+ ]{p_end}
38
+
39
+ {synoptset 28 tabbed}{...}
40
+
41
+ {marker description}{...}
42
+ {title:Description}
43
+
44
+ {p 4 8}{cmd:rdbwselect} implements bandwidth selectors for local polynomial Regression Discontinuity (RD) point estimators and inference procedures developed in
45
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_ECMA.pdf":Calonico, Cattaneo and Titiunik (2014a)},
46
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":Calonico, Cattaneo and Farrell (2018)},
47
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2019_RESTAT.pdf":Calonico, Cattaneo, Farrell and Titiunik (2019)},
48
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_ECTJ.pdf":Calonico, Cattaneo and Farrell (2020)}.{p_end}
49
+
50
+ {p 8 8} Companion commands are: {help rdrobust:rdrobust} for point estimation and inference procedures, and {help rdplot:rdplot} for data-driven RD plots (see
51
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_JASA.pdf":Calonico, Cattaneo and Titiunik (2015a)} for details).{p_end}
52
+
53
+ {p 8 8}A detailed introduction to this command is given in
54
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Calonico, Cattaneo and Titiunik (2014b)},
55
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2017_Stata.pdf":Calonico, Cattaneo, Farrell and Titiunik (2017)}. A companion {browse "www.r-project.org":R} package is also described in
56
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_R.pdf":Calonico, Cattaneo and Titiunik (2015b)}.{p_end}
57
+
58
+ {p 4 8}Related Stata and R packages useful for inference in RD designs are described in the following website:{p_end}
59
+
60
+ {p 8 8}{browse "https://rdpackages.github.io/":https://rdpackages.github.io/}{p_end}
61
+
62
+
63
+ {marker options}{...}
64
+ {title:Options}
65
+
66
+ {dlgtab:Estimand}
67
+
68
+ {p 4 8}{cmd:c(}{it:#}{cmd:)} specifies the RD cutoff for {it:indepvar}.
69
+ Default is {cmd:c(0)}.{p_end}
70
+
71
+ {p 4 8}{cmd:fuzzy(}{it:fuzzyvar [sharpbw]}{cmd:)} specifies the treatment status variable used to implement fuzzy RD estimation (or Fuzzy Kink RD if {cmd:deriv(1)} is also specified).
72
+ Default is Sharp RD design and hence this option is not used.
73
+ If the option {it:sharpbw} is set, the fuzzy RD estimation is performed using a bandwidth selection procedure for the sharp RD model. This option is automatically selected if there is perfect compliance at either side of the threshold.
74
+ {p_end}
75
+
76
+ {p 4 8}{cmd:deriv(}{it:#}{cmd:)} specifies the order of the derivative of the regression functions to be estimated.
77
+ Default is {cmd:deriv(0)} (for Sharp RD, or for Fuzzy RD if {cmd:fuzzy(.)} is also specified). Setting {cmd:deriv(1)} results in estimation of a Kink RD design (up to scale), or Fuzzy Kink RD if {cmd:fuzzy(.)} is also specified.{p_end}
78
+
79
+ {dlgtab:Local Polynomial Regression}
80
+
81
+ {p 4 8}{cmd:p(}{it:#}{cmd:)} specifies the order of the local polynomial used to construct the point estimator.
82
+ Default is {cmd:p(1)} (local linear regression).{p_end}
83
+
84
+ {p 4 8}{cmd:q(}{it:#}{cmd:)} specifies the order of the local polynomial used to construct the bias correction.
85
+ Default is {cmd:q(2)} (local quadratic regression).{p_end}
86
+
87
+ {p 4 8}{cmd:covs(}{it:covars}{cmd:)} specifies additional covariates to be used for estimation and inference.{p_end}
88
+
89
+ {p 4 8}{cmd:covs_drop(}{it:covsdropoption}{cmd:)} assess collinearity in additional covariates used for estimation and inference. Options {opt pinv} (default choice) and {opt invsym} drops collinear additional covariates, differing only in the type of inverse function used. Option {opt off} only checks collinear additional covariates but does not drop them.{p_end}
90
+
91
+ {p 4 8}{cmd:kernel(}{it:kernelfn}{cmd:)} specifies the kernel function used to construct the local-polynomial estimator(s). Options are: {opt tri:angular}, {opt epa:nechnikov}, and {opt uni:form}.
92
+ Default is {cmd:kernel(triangular)}.{p_end}
93
+
94
+ {p 4 8}{cmd:weights(}{it:weightsvar}{cmd:)} is the variable used for optional weighting of the estimation procedure. The unit-specific weights multiply the kernel function.{p_end}
95
+
96
+ {dlgtab:Bandwidth Selection}
97
+
98
+ {p 4 8}{cmd:bwselect(}{it:bwmethod}{cmd:)} specifies the bandwidth selection procedure to be used.
99
+ Options are:{p_end}
100
+ {p 8 12}{opt mserd} one common MSE-optimal bandwidth selector for the RD treatment effect estimator.{p_end}
101
+ {p 8 12}{opt msetwo} two different MSE-optimal bandwidth selectors (below and above the cutoff) for the RD treatment effect estimator.{p_end}
102
+ {p 8 12}{opt msesum} one common MSE-optimal bandwidth selector for the sum of regression estimates (as opposed to difference thereof).{p_end}
103
+ {p 8 12}{opt msecomb1} for min({opt mserd},{opt msesum}).{p_end}
104
+ {p 8 12}{opt msecomb2} for median({opt msetwo},{opt mserd},{opt msesum}), for each side of the cutoff separately.{p_end}
105
+ {p 8 12}{opt cerrd} one common CER-optimal bandwidth selector for the RD treatment effect estimator.{p_end}
106
+ {p 8 12}{opt certwo} two different CER-optimal bandwidth selectors (below and above the cutoff) for the RD treatment effect estimator.{p_end}
107
+ {p 8 12}{opt cersum} one common CER-optimal bandwidth selector for the sum of regression estimates (as opposed to difference thereof).{p_end}
108
+ {p 8 12}{opt cercomb1} for min({opt cerrd},{opt cersum}).{p_end}
109
+ {p 8 12}{opt cercomb2} for median({opt certwo},{opt cerrd},{opt cersum}), for each side of the cutoff separately.{p_end}
110
+ {p 8 12}Note: MSE = Mean Square Error; CER = Coverage Error Rate.{p_end}
111
+ {p 8 12}Default is {cmd:bwselect(mserd)}. For details on implementation see
112
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_ECMA.pdf":Calonico, Cattaneo and Titiunik (2014a)},
113
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":Calonico, Cattaneo and Farrell (2018)},
114
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2019_RESTAT.pdf":Calonico, Cattaneo, Farrell and Titiunik (2019)},
115
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_ECTJ.pdf":Calonico, Cattaneo and Farrell (2020)},
116
+ and the companion software articles.{p_end}
117
+
118
+ {p 4 8}{cmd:all} if specified, {cmd:rdbwselect} reports all available bandwidth selection procedures.{p_end}
119
+
120
+ {p 4 8}{cmd:scaleregul(}{it:#}{cmd:)} specifies scaling factor for the regularization term added to the denominator of the bandwidth selectors. Setting {cmd:scaleregul(0)} removes the regularization term from the bandwidth selectors.
121
+ Default is {cmd:scaleregul(1)}.{p_end}
122
+
123
+ {p 4 8}{cmd:masspoints(}{it:masspointsoption}{cmd:)} checks and controls for repeated observations in the running variable.
124
+ Options are:{p_end}
125
+ {p 8 12}{opt off} ignores the presence of mass points. {p_end}
126
+ {p 8 12}{opt check} looks for and reports the number of unique observations at each side of the cutoff. {p_end}
127
+ {p 8 12}{opt adjust} controls that the preliminary bandwidths used in the calculations contain a minimal number of unique observations. By default it uses 10 observations, but it can be manually adjusted with the option {cmd:bwcheck}.{p_end}
128
+ {p 8 12} Default option is {cmd:masspoints(adjust)}.{p_end}
129
+
130
+ {p 4 8}{cmd:bwcheck(}{it:bwcheck}{cmd:)} if a positive integer is provided, the preliminary bandwidth used in the calculations is enlarged so that at least {it:bwcheck} unique observations are used. {p_end}
131
+
132
+ {p 4 8}{cmd:bwrestrict(}{it:bwropt}{cmd:)} if set {opt on}, computed bandwidths are restricted to lie within the range of {it:runvar}. Default is {opt on}.{p_end}
133
+
134
+ {p 4 8}{cmd:stdvars(}{it:stdopt}{cmd:)} if set {opt on}, {it:depvar} and {it:runvar} are standardized before computing the bandwidths. Default is {opt off}.{p_end}
135
+
136
+ {dlgtab:Variance-Covariance Estimation}
137
+
138
+ {p 4 8}{cmd:vce(}{it:vcetype [vceopt1 vceopt2]}{cmd:)} specifies the procedure used to compute the variance-covariance matrix estimator.
139
+ Options are:{p_end}
140
+ {p 8 12}{cmd:vce(nn }{it:[nnmatch]}{cmd:)} for heteroskedasticity-robust nearest neighbor variance estimator with {it:nnmatch} indicating the minimum number of neighbors to be used.{p_end}
141
+ {p 8 12}{cmd:vce(hc0)} for heteroskedasticity-robust plug-in residuals variance estimator without weights.{p_end}
142
+ {p 8 12}{cmd:vce(hc1)} for heteroskedasticity-robust plug-in residuals variance estimator with {it:hc1} weights.{p_end}
143
+ {p 8 12}{cmd:vce(hc2)} for heteroskedasticity-robust plug-in residuals variance estimator with {it:hc2} weights.{p_end}
144
+ {p 8 12}{cmd:vce(hc3)} for heteroskedasticity-robust plug-in residuals variance estimator with {it:hc3} weights.{p_end}
145
+ {p 8 12}{cmd:vce(nncluster }{it:clustervar [nnmatch]}{cmd:)} for cluster-robust nearest neighbor variance estimation using with {it:clustervar} indicating the cluster ID variable and {it: nnmatch} matches indicating the minimum number of neighbors to be used.{p_end}
146
+ {p 8 12}{cmd:vce(cluster }{it:clustervar}{cmd:)} for cluster-robust plug-in residuals variance estimation with degrees-of-freedom weights and {it:clustervar} indicating the cluster ID variable.{p_end}
147
+ {p 8 12}Default is {cmd:vce(nn 3)}.{p_end}
148
+
149
+ {hline}
150
+
151
+
152
+ {marker examples}{...}
153
+ {title:Example: Cattaneo, Frandsen and Titiunik (2015) Incumbency Data}
154
+
155
+
156
+ {p 4 8}Setup{p_end}
157
+ {p 8 8}{cmd:. use rdrobust_senate.dta}{p_end}
158
+
159
+ {p 4 8}MSE bandwidth selection procedure{p_end}
160
+ {p 8 8}{cmd:. rdbwselect vote margin}{p_end}
161
+
162
+ {p 4 8}All bandwidth bandwidth selection procedures{p_end}
163
+ {p 8 8}{cmd:. rdbwselect vote margin, all}{p_end}
164
+
165
+
166
+ {marker stored_results}{...}
167
+ {title:Stored results}
168
+
169
+ {p 4 8}{cmd:rdbwselect} stores the following in {cmd:e()}:
170
+
171
+ {synoptset 20 tabbed}{...}
172
+ {p2col 5 20 24 2: Scalars}{p_end}
173
+ {synopt:{cmd:e(N_l)}}number of observations to the left of the cutoff{p_end}
174
+ {synopt:{cmd:e(N_r)}}number of observations to the right of the cutoff{p_end}
175
+ {synopt:{cmd:e(c)}}cutoff value{p_end}
176
+ {synopt:{cmd:e(p)}}order of the polynomial used for estimation of the regression function{p_end}
177
+ {synopt:{cmd:e(q)}}order of the polynomial used for estimation of the bias of the regression function estimator{p_end}
178
+
179
+ {synopt:{cmd:e(h_mserd)}} MSE-optimal bandwidth selector for the RD treatment effect estimator.{p_end}
180
+ {synopt:{cmd:e(h_msetwo_l)}} MSE-optimal bandwidth selectors below the cutoff for the RD treatment effect estimator.{p_end}
181
+ {synopt:{cmd:e(h_msetwo_r)}} MSE-optimal bandwidth selectors above the cutoff for the RD treatment effect estimator.{p_end}
182
+ {synopt:{cmd:e(h_msesum)}} MSE-optimal bandwidth selector for the sum of regression estimates.{p_end}
183
+ {synopt:{cmd:e(h_msecomb1)}} for min({opt mserd},{opt msesum}).{p_end}
184
+ {synopt:{cmd:e(h_msecomb2_l)}} for median({opt msetwo},{opt mserd},{opt msesum}), below the cutoff.{p_end}
185
+ {synopt:{cmd:e(h_msecomb2_r)}} for median({opt msetwo},{opt mserd},{opt msesum}), above the cutoff.{p_end}
186
+
187
+ {synopt:{cmd:e(h_cerrd)}} CER-optimal bandwidth selector for the RD treatment effect estimator.{p_end}
188
+ {synopt:{cmd:e(h_certwo_l)}} CER-optimal bandwidth selectors below the cutoff for the RD treatment effect estimator.{p_end}
189
+ {synopt:{cmd:e(h_certwo_r)}} CER-optimal bandwidth selectors above the cutoff for the RD treatment effect estimator.{p_end}
190
+ {synopt:{cmd:e(h_cersum)}} CER-optimal bandwidth selector for the sum of regression estimates.{p_end}
191
+ {synopt:{cmd:e(h_cercomb1)}} for min({opt cerrd},{opt cersum}).{p_end}
192
+ {synopt:{cmd:e(h_cercomb2_l)}} for median({opt certwo_l},{opt cerrd},{opt cersum}), below the cutoff.{p_end}
193
+ {synopt:{cmd:e(h_cercomb2_r)}} for median({opt certwo_r},{opt cerrd},{opt cersum}), above the cutoff.{p_end}
194
+
195
+ {synopt:{cmd:e(b_mserd)}} MSE-optimal bandwidth selector for the bias of the RD treatment effect estimator.{p_end}
196
+ {synopt:{cmd:e(b_msetwo_l)}} MSE-optimal bandwidth selectors below the cutoff for the bias of the RD treatment effect estimator.{p_end}
197
+ {synopt:{cmd:e(b_msetwo_r)}} MSE-optimal bandwidth selectors above the cutoff for the bias of the RD treatment effect estimator.{p_end}
198
+ {synopt:{cmd:e(b_msesum)}} MSE-optimal bandwidth selector for the sum of regression estimates for the bias of the RD treatment effect estimator.{p_end}
199
+ {synopt:{cmd:e(b_msecomb1)}} for min({opt mserd},{opt msesum}).{p_end}
200
+ {synopt:{cmd:e(b_msecomb2_l)}} for median({opt msetwo},{opt mserd},{opt msesum}), below the cutoff.{p_end}
201
+ {synopt:{cmd:e(b_msecomb2_r)}} for median({opt msetwo},{opt mserd},{opt msesum}), above the cutoff.{p_end}
202
+
203
+ {synopt:{cmd:e(b_cerrd)}} CER-optimal bandwidth selector for the bias of the RD treatment effect estimator.{p_end}
204
+ {synopt:{cmd:e(b_certwo_l)}} CER-optimal bandwidth selectors below the cutoff for the bias of the RD treatment effect estimator.{p_end}
205
+ {synopt:{cmd:e(b_certwo_r)}} CER-optimal bandwidth selectors above the cutoff for the bias of the RD treatment effect estimator.{p_end}
206
+ {synopt:{cmd:e(b_cersum)}} CER-optimal bandwidth selector for the sum of regression estimates for the bias of the RD treatment effect estimator.{p_end}
207
+ {synopt:{cmd:e(b_cercomb1)}} for min({opt cerrd},{opt cersum}).{p_end}
208
+ {synopt:{cmd:e(b_cercomb2_l)}} for median({opt certwo_l},{opt cerrd},{opt cersum}), below the cutoff.{p_end}
209
+ {synopt:{cmd:e(b_cercomb2_r)}} for median({opt certwo_r},{opt cerrd},{opt cersum}), above the cutoff.{p_end}
210
+
211
+ {p2col 5 20 24 2: Macros}{p_end}
212
+ {synopt:{cmd:e(runningvar)}}name of running variable{p_end}
213
+ {synopt:{cmd:e(outcomevar)}}name of outcome variable{p_end}
214
+ {synopt:{cmd:e(clustvar)}}name of cluster variable{p_end}
215
+ {synopt:{cmd:e(covs)}}name of covariates{p_end}
216
+ {synopt:{cmd:e(vce_select)}}vcetype specified in vce(){p_end}
217
+ {synopt:{cmd:e(bwselect)}}bandwidth selection choice{p_end}
218
+ {synopt:{cmd:e(kernel)}}kernel choice{p_end}
219
+
220
+
221
+ {marker references}{...}
222
+ {title:References}
223
+
224
+ {p 4 8}Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2020.
225
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_ECTJ.pdf":Optimal Bandwidth Choice for Robust Bias Corrected Inference in Regression Discontinuity Designs}.
226
+ {it:Econometrics Journal} 23(2): 192-210.{p_end}
227
+
228
+ {p 4 8}Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2018.
229
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":On the Effect of Bias Estimation on Coverage Accuracy in Nonparametric Inference}.
230
+ {it:Journal of the American Statistical Association} 113(522): 767-779.{p_end}
231
+
232
+ {p 4 8}Calonico, S., M. D. Cattaneo, M. H. Farrell, and R. Titiunik. 2019.
233
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2019_RESTAT.pdf":Regression Discontinuity Designs using Covariates}.
234
+ {it:Review of Economics and Statistics}, 101(3): 442-451.{p_end}
235
+
236
+ {p 4 8}Calonico, S., M. D. Cattaneo, M. H. Farrell, and R. Titiunik. 2017.
237
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2017_Stata.pdf":rdrobust: Software for Regression Discontinuity Designs}.
238
+ {it:Stata Journal} 17(2): 372-404.{p_end}
239
+
240
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2014a.
241
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_ECMA.pdf":Robust Nonparametric Confidence Intervals for Regression-Discontinuity Designs}.
242
+ {it:Econometrica} 82(6): 2295-2326.{p_end}
243
+
244
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2014b.
245
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Robust Data-Driven Inference in the Regression-Discontinuity Design}.
246
+ {it:Stata Journal} 14(4): 909-946.{p_end}
247
+
248
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2015a.
249
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_JASA.pdf":Optimal Data-Driven Regression Discontinuity Plots}.
250
+ {it:Journal of the American Statistical Association} 110(512): 1753-1769.{p_end}
251
+
252
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2015b.
253
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_R.pdf":rdrobust: An R Package for Robust Nonparametric Inference in Regression-Discontinuity Designs}.
254
+ {it:R Journal} 7(1): 38-51.{p_end}
255
+
256
+ {p 4 8}Cattaneo, M. D., B. Frandsen, and R. Titiunik. 2015.
257
+ {browse "https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf":Randomization Inference in the Regression Discontinuity Design: An Application to Party Advantages in the U.S. Senate}.
258
+ {it:Journal of Causal Inference} 3(1): 1-24.{p_end}
259
+
260
+ {marker authors}{...}
261
+ {title:Authors}
262
+
263
+ {p 4 8}Sebastian Calonico, Columbia University, New York, NY.
264
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
265
+
266
+ {p 4 8}Matias D. Cattaneo, Princeton University, Princeton, NJ.
267
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
268
+
269
+ {p 4 8}Max H. Farrell, University of Chicago, Chicago, IL.
270
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
271
+
272
+ {p 4 8}Rocio Titiunik, Princeton University, Princeton, NJ.
273
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
274
+
275
+
30/replication_package/Adofiles/rd_2021/rdbwselect_2014.ado ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *!version 6.0 2014-10-14
2
+
3
+ capture program drop rdbwselect_2014
4
+ program define rdbwselect_2014, eclass
5
+ syntax anything [if] [in] [, c(real 0) deriv(real 0) p(real 1) q(real 0) kernel(string) bwselect(string) rho(real 0) vce(string) matches(real 3) delta(real 0.5) cvgrid_min(real 0) cvgrid_max(real 0) cvgrid_length(real 0) cvplot all precalc scaleregul(real 1) ]
6
+
7
+ local kernel = lower("`kernel'")
8
+ local bwselect = upper("`bwselect'")
9
+ local vce = lower("`vce'")
10
+
11
+ marksample touse
12
+ preserve
13
+ qui keep if `touse'
14
+ tokenize "`anything'"
15
+ local y `1'
16
+ local x `2'
17
+ qui drop if `y'==. | `x'==.
18
+ tempvar x_l x_r y_l y_r
19
+ local b_calc = 0
20
+
21
+ if (`rho'==0){
22
+ local b_calc = 1
23
+ local rho = 1
24
+ }
25
+
26
+ qui su `x' if `x'<`c', d
27
+ local medX_l = r(p50)
28
+ qui su `x' if `x'>=`c', d
29
+ local medX_r = r(p50)
30
+
31
+ if ("`precalc'"==""){
32
+ qui gen `x_l' = `x' if `x'<`c'
33
+ qui gen `x_r' = `x' if `x'>=`c'
34
+ qui gen `y_l' = `y' if `x'<`c'
35
+ qui gen `y_r' = `y' if `x'>=`c'
36
+
37
+ qui su `x'
38
+ local x_min = r(min)
39
+ local x_max = r(max)
40
+ qui su `x_l',d
41
+ local N_l = r(N)
42
+ local range_l = abs(r(max)-r(min))
43
+ qui su `x_r',d
44
+ local N_r = r(N)
45
+ local range_r = abs(r(max)-r(min))
46
+ local N = `N_r' + `N_l'
47
+
48
+ if ("`deriv'">"0" & "`p'"=="1" & "`q'"=="0"){
49
+ local p = `deriv'+1
50
+ }
51
+
52
+ if ("`q'"=="0") {
53
+ local q = `p'+1
54
+ }
55
+
56
+
57
+ **************************** ERRORS
58
+ if (`c'<=`x_min' | `c'>=`x_max'){
59
+ di "{err}{cmd:c()} should be set within the range of `x'"
60
+ exit 125
61
+ }
62
+
63
+ if (`N_l'<20 | `N_r'<20){
64
+ di "{err}Not enough observations to perform calculations"
65
+ exit 2001
66
+ }
67
+
68
+ if ("`p'">"8"){
69
+ di "{err}{cmd:p()} should be less or equal than 8 for this version of the software package"
70
+ exit 125
71
+ }
72
+
73
+
74
+ if ("`kernel'"~="uni" & "`kernel'"~="uniform" & "`kernel'"~="tri" & "`kernel'"~="triangular" & "`kernel'"~="epa" & "`kernel'"~="epanechnikov" & "`kernel'"~="" ){
75
+ di "{err}{cmd:kernel()} incorrectly specified"
76
+ exit 7
77
+ }
78
+
79
+ if ("`bwselect'"~="CCT" & "`bwselect'"~="IK" & "`bwselect'"~="CV" & "`bwselect'"~=""){
80
+ di "{err}{cmd:bwselect()} incorrectly specified"
81
+ exit 7
82
+ }
83
+
84
+ if ("`vce'"~="resid" & "`vce'"~="nn" & "`vce'"~=""){
85
+ di "{err}{cmd:vce()} incorrectly specified"
86
+ exit 7
87
+ }
88
+
89
+ if ("`p'"<"0" | "`q'"<="0" | "`deriv'"<"0" | "`matches'"<="0" | `scaleregul'<0){
90
+ di "{err}{cmd:p()}, {cmd:q()}, {cmd:deriv()}, {cmd:matches()} and {cmd:scaleregul()} should be positive"
91
+ exit 411
92
+ }
93
+
94
+ if ("`p'">="`q'" & "`q'">"0"){
95
+ di "{err}{cmd:q()} should be higher than {cmd:p()}"
96
+ exit 125
97
+ }
98
+
99
+ if ("`deriv'">"`p'" & "`deriv'">"0" ){
100
+ di "{err}{cmd:deriv()} can not be higher than {cmd:p()}"
101
+ exit 125
102
+ }
103
+
104
+ if ("`p'">"0" ) {
105
+ local p_round = round(`p')/`p'
106
+ local q_round = round(`q')/`q'
107
+ local d_round = round(`deriv'+1)/(`deriv'+1)
108
+ local m_round = round(`matches')/`matches'
109
+
110
+ if (`p_round'!=1 | `q_round'!=1 |`d_round'!=1 |`m_round'!=1 ){
111
+ di "{err}{cmd:p()}, {cmd:q()}, {cmd:deriv()} and {cmd:matches()} should be integers"
112
+ exit 126
113
+ }
114
+ }
115
+
116
+ if (`delta'>1 | `delta'<=0){
117
+ di "{err}{cmd:delta()}should be set between 0 and 1"
118
+ exit 125
119
+ }
120
+
121
+ if (`rho'>1 | `rho'<0){
122
+ di "{err}{cmd:rho()}should be set between 0 and 1"
123
+ exit 125
124
+ }
125
+
126
+ if (`cvgrid_min'<0 | `cvgrid_max'<0 | `cvgrid_length'<0 ){
127
+ di "{err}{cmd:cvgrid_min()}, {cmd:cvgrid_max()} and {cmd:cvgrid_length()} should be positive numbers"
128
+ exit 126
129
+ }
130
+
131
+ if (`cvgrid_min'>`cvgrid_max' ){
132
+ di "{err}{cmd:cvgrid_min()} should be lower than {cmd:cvgrid_max()}"
133
+ exit 125
134
+ }
135
+
136
+ if (`deriv'>0 & ("`bwselect'"=="IK" | "`bwselect'"=="CV" | "`all'"!="")) {
137
+ di "{err}{cmd:IK} and {cmd:CV} implementations are not availale for {cmd:deriv}>0; use CCT instead"
138
+ exit 125
139
+ }
140
+
141
+ if ("`exit'">"0") {
142
+ exit
143
+ }
144
+
145
+ if ("`kernel'"=="epanechnikov" | "`kernel'"=="epa") {
146
+ local kernel_type = "Epanechnikov"
147
+ }
148
+ else if ("`kernel'"=="uniform" | "`kernel'"=="uni") {
149
+ local kernel_type = "Uniform"
150
+ }
151
+ else {
152
+ local kernel_type = "Triangular"
153
+ }
154
+ }
155
+
156
+ local p1 = `p' + 1
157
+ local p2 = `p' + 2
158
+ local q1 = `q' + 1
159
+ local q2 = `q' + 2
160
+ local q3 = `q' + 3
161
+ quietly count if `x'<`c'
162
+ local N_l = r(N)
163
+ quietly count if `c'<=`x'
164
+ local N_r = r(N)
165
+ local N = `N_r' + `N_l'
166
+ local m = `matches' + 1
167
+
168
+ if ("`kernel'"=="epanechnikov" | "`kernel'"=="epa") {
169
+ local kid=3
170
+ local C_pilot=2.34
171
+ }
172
+ else if ("`kernel'"=="uniform" | "`kernel'"=="uni") {
173
+ local kid=2
174
+ local C_pilot=1.84
175
+ }
176
+ else {
177
+ local kid=1
178
+ local C_pilot=2.58
179
+ }
180
+
181
+ rdbwselect_2014_kconst `p' `deriv' `kid'
182
+ local C1_h = e(C1)
183
+ local C2_h = e(C2)
184
+ rdbwselect_2014_kconst `q' `q' `kid'
185
+ local C1_b = e(C1)
186
+ local C2_b = e(C2)
187
+ rdbwselect_2014_kconst `q1' `q1' `kid'
188
+ local C1_q = e(C1)
189
+ local C2_q = e(C2)
190
+
191
+ rdbwselect_2014_kconst `q' `q' 2
192
+ local C1_b_uni = e(C1)
193
+ local C2_b_uni = e(C2)
194
+ rdbwselect_2014_kconst `q1' `q1' 2
195
+ local C1_q_uni = e(C1)
196
+ local C2_q_uni = e(C2)
197
+
198
+ ***********************************************************************
199
+ **************************** CCT Approach
200
+ ***********************************************************************
201
+ qui su `x', d
202
+ local h_pilot_CCT = `C_pilot'*min(r(sd),(r(p75)-r(p25))/1.349)*r(N)^(-1/5)
203
+
204
+ mata{
205
+ h_pilot_CCT=`h_pilot_CCT'
206
+ N_l = `N_l'
207
+ N_r = `N_r'
208
+ p = `p'
209
+ q = `q'
210
+ c = `c'
211
+ C1_h=`C1_h'
212
+ C2_h=`C2_h'
213
+ C1_b=`C1_b'
214
+ C2_b=`C2_b'
215
+ C1_q=`C1_q'
216
+ C2_q=`C2_q'
217
+
218
+ C1_b_uni=`C1_b_uni'
219
+ C2_b_uni=`C2_b_uni'
220
+ C1_q_uni=`C1_q_uni'
221
+ C2_q_uni=`C2_q_uni'
222
+
223
+ deriv = `deriv'
224
+ p1 = p+1; q1 = q+1; p2 = p+2; q2 = q+2; p3 = p+3; q3 = q+3
225
+ Y = st_data(.,("`y'"), 0); X = st_data(.,("`x'"), 0)
226
+ X_l = select(X,X:<c); X_r = select(X,X:>=c)
227
+ Y_l = select(Y,X:<c); Y_r = select(Y,X:>=c)
228
+ X_lq2 = J(N_l, q+3, .); X_rq2 = J(N_r, q+3, .)
229
+ for (j=1; j<=q3; j++) {
230
+ X_lq2[.,j] = (X_l:-c):^(j-1)
231
+ X_rq2[.,j] = (X_r:-c):^(j-1)
232
+ }
233
+
234
+ X_lq1 = X_lq2[.,1::q2];X_rq1 = X_rq2[.,1::q2]
235
+ X_lq = X_lq2[.,1::q1];X_rq = X_rq2[.,1::q1]
236
+ X_lp = X_lq2[.,1::p1];X_rp = X_rq2[.,1::p1]
237
+
238
+ if ("`bwselect'"=="CCT" | "`bwselect'"=="" | "`all'"!="") {
239
+
240
+ display("Computing CCT bandwidth selector.")
241
+
242
+ *** Step 1: q_CCT
243
+ * Variances for all CCT estimators
244
+ w_pilot_l = rdbwselect_2014_kweight(X_l,c,h_pilot_CCT,"`kernel'")
245
+ w_pilot_r = rdbwselect_2014_kweight(X_r,c,h_pilot_CCT,"`kernel'")
246
+ Gamma_pilot_lq1 = cross(X_lq1, w_pilot_l, X_lq1); Gamma_pilot_rq1 = cross(X_rq1, w_pilot_r, X_rq1)
247
+ Gamma_pilot_lq = Gamma_pilot_lq1[1::`q1',1::`q1']; Gamma_pilot_rq = Gamma_pilot_rq1[1::`q1',1::`q1']
248
+ Gamma_pilot_lp = Gamma_pilot_lq1[1::`p1',1::`p1']; Gamma_pilot_rp = Gamma_pilot_rq1[1::`p1',1::`p1']
249
+ invGamma_pilot_lq1 = invsym(Gamma_pilot_lq1); invGamma_pilot_rq1 = invsym(Gamma_pilot_rq1)
250
+ invGamma_pilot_lq = invsym(Gamma_pilot_lq); invGamma_pilot_rq = invsym(Gamma_pilot_rq)
251
+ invGamma_pilot_lp = invsym(Gamma_pilot_lp); invGamma_pilot_rp = invsym(Gamma_pilot_rp)
252
+ sigma_l_pilot = rdbwselect_2014_rdvce(X_l, Y_l, Y_l, `p', `h_pilot_CCT', `matches', "`vce'", "`kernel'")
253
+ sigma_r_pilot = rdbwselect_2014_rdvce(X_r, Y_r, Y_r, `p', `h_pilot_CCT', `matches', "`vce'", "`kernel'")
254
+ Psi_pilot_lq1 = cross(X_lq1, w_pilot_l:*sigma_l_pilot:*w_pilot_l, X_lq1)
255
+ Psi_pilot_rq1 = cross(X_rq1, w_pilot_r:*sigma_r_pilot:*w_pilot_r, X_rq1)
256
+ Psi_pilot_lq = Psi_pilot_lq1[1::`q1',1::`q1']; Psi_pilot_rq = Psi_pilot_rq1[1::`q1',1::`q1']
257
+ Psi_pilot_lp = Psi_pilot_lq1[1::`p1',1::`p1']; Psi_pilot_rp = Psi_pilot_rq1[1::`p1',1::`p1']
258
+ V_m3_pilot_CCT = (invGamma_pilot_lq1*Psi_pilot_lq1*invGamma_pilot_lq1)[`q'+2,`q'+2] + (invGamma_pilot_rq1*Psi_pilot_rq1*invGamma_pilot_rq1)[`q'+2,`q'+2]
259
+ V_m2_pilot_CCT = (invGamma_pilot_lq*Psi_pilot_lq*invGamma_pilot_lq)[`q'+1,`q'+1] + (invGamma_pilot_rq*Psi_pilot_rq*invGamma_pilot_rq)[`q'+1,`q'+1]
260
+ V_m0_pilot_CCT = (invGamma_pilot_lp*Psi_pilot_lp*invGamma_pilot_lp)[`deriv'+1,`deriv'+1] + (invGamma_pilot_rp*Psi_pilot_rp*invGamma_pilot_rp)[`deriv'+1,`deriv'+1]
261
+ * Numerator
262
+ N_q_CCT=(2*q+3)*`N'*`h_pilot_CCT'^(2*q+3)*V_m3_pilot_CCT
263
+ * Denominator
264
+ m4_l_pilot_CCT = (invsym(cross(X_lq2,X_lq2))*cross(X_lq2,Y_l))[`q3',1]
265
+ m4_r_pilot_CCT = (invsym(cross(X_rq2,X_rq2))*cross(X_rq2,Y_r))[`q3',1]
266
+ D_q_CCT = 2*(C1_q*(m4_r_pilot_CCT-(-1)^(deriv+q)*m4_l_pilot_CCT))^2
267
+ * Final
268
+ q_CCT = (N_q_CCT/(`N'*D_q_CCT))^(1/(2*q+5))
269
+
270
+ *** Step 2: b_CCT
271
+ * Numerator
272
+ N_b_CCT = (2*p+3)*`N'*`h_pilot_CCT'^(2*p+3)*V_m2_pilot_CCT
273
+ * Denominator
274
+ w_q_l=rdbwselect_2014_kweight(X_l,c,q_CCT,"`kernel'")
275
+ w_q_r=rdbwselect_2014_kweight(X_r,c,q_CCT,"`kernel'")
276
+ m3_l_CCT = (invsym(cross(X_lq1, w_q_l, X_lq1))*cross(X_lq1, w_q_l, Y_l))[q2,1]
277
+ m3_r_CCT = (invsym(cross(X_rq1, w_q_r, X_rq1))*cross(X_rq1, w_q_r, Y_r))[q2,1]
278
+ D_b_CCT = 2*(q-p)*(C1_b*(m3_r_CCT - (-1)^(deriv+q+1)*m3_l_CCT))^2
279
+ * Regul
280
+ invGamma_q_lq1_CCT = invsym(cross(X_lq1, w_q_l, X_lq1))
281
+ invGamma_q_rq1_CCT = invsym(cross(X_rq1, w_q_r, X_rq1))
282
+ Psi_q_lq1_CCT = cross(X_lq1, w_q_l:*sigma_l_pilot:*w_q_l, X_lq1)
283
+ Psi_q_rq1_CCT = cross(X_rq1, w_q_r:*sigma_r_pilot:*w_q_r, X_rq1)
284
+ V_m3_q_CCT = (invGamma_q_lq1_CCT*Psi_q_lq1_CCT*invGamma_q_lq1_CCT)[`q'+2,`q'+2] + (invGamma_q_rq1_CCT*Psi_q_rq1_CCT*invGamma_q_rq1_CCT)[`q'+2,`q'+2]
285
+ R_b_CCT = `scaleregul'*2*(q-p)*C1_b^2*3*V_m3_q_CCT
286
+ * Final
287
+ b_CCT = (N_b_CCT / (`N'*(D_b_CCT + R_b_CCT)))^(1/(2*q+3))
288
+
289
+ *** Step 3: h_CCT
290
+ * Numerator
291
+ N_h_CCT = (2*`deriv'+1)*`N'*`h_pilot_CCT'^(2*`deriv'+1)*V_m0_pilot_CCT
292
+ * Denominator
293
+ w_b_l=rdbwselect_2014_kweight(X_l,`c',b_CCT,"`kernel'")
294
+ w_b_r=rdbwselect_2014_kweight(X_r,`c',b_CCT,"`kernel'")
295
+ m2_l_CCT = (invsym(cross(X_lq, w_b_l, X_lq))*cross(X_lq, w_b_l, Y_l))[`p2',1]
296
+ m2_r_CCT = (invsym(cross(X_rq, w_b_r, X_rq))*cross(X_rq, w_b_r, Y_r))[`p2',1]
297
+ D_h_CCT = 2*(p+1-`deriv')*(C1_h*(m2_r_CCT - (-1)^(`deriv'+p+1)*m2_l_CCT))^2
298
+ * Regul
299
+ invGamma_b_lq_CCT = invsym(cross(X_lq, w_b_l, X_lq))
300
+ invGamma_b_rq_CCT = invsym(cross(X_rq, w_b_r, X_rq))
301
+ Psi_b_lq_CCT = cross(X_lq, w_b_l:*sigma_l_pilot:*w_b_l, X_lq)
302
+ Psi_b_rq_CCT = cross(X_rq, w_b_r:*sigma_r_pilot:*w_b_r, X_rq)
303
+ V_m2_b_CCT = (invGamma_b_lq_CCT*Psi_b_lq_CCT*invGamma_b_lq_CCT)[`p2',`p2'] + (invGamma_b_rq_CCT*Psi_b_rq_CCT*invGamma_b_rq_CCT)[`p2',`p2']
304
+ R_h_CCT = `scaleregul'*2*(`p'+1-`deriv')*C1_h^2*3*V_m2_b_CCT
305
+ * Final
306
+ h_CCT = (N_h_CCT / (`N'*(D_h_CCT+R_h_CCT)))^(1/(2*p+3))
307
+
308
+ st_numscalar("h_CCT",h_CCT)
309
+ st_numscalar("q_CCT",q_CCT)
310
+
311
+ if (`b_calc'==0) {
312
+ b_CCT = h_CCT/`rho'
313
+ }
314
+ st_numscalar("b_CCT",b_CCT)
315
+ }
316
+
317
+ ***************************************************************************************************
318
+ ******************** IK
319
+ **************************************************************************************************
320
+ if ("`bwselect'"=="IK" | "`all'"~="") {
321
+
322
+ display("Computing IK bandwidth selector.")
323
+ h_pilot_IK = 1.84*sqrt(variance(X))*length(X)^(-1/5)
324
+ n_l_h1 = length(select(X_l,X_l:>=`c'-h_pilot_IK))
325
+ n_r_h1 = length(select(X_r,X_r:<=`c'+h_pilot_IK))
326
+ f0_pilot=(n_r_h1+n_l_h1)/(2*`N'*h_pilot_IK)
327
+ s2_l_pilot = variance(select(Y_l,X_l:>=`c'-h_pilot_IK))
328
+ s2_r_pilot = variance(select(Y_r,X_r:<=`c'+h_pilot_IK))
329
+
330
+ if (s2_l_pilot==0){
331
+ s2_l_pilot=variance(select(Y_l,X_l:>=`c'-2*h_pilot_IK))
332
+ }
333
+
334
+ if (s2_r_pilot==0){
335
+ s2_r_pilot=variance(select(Y_r,X_r:<=`c'+2*h_pilot_IK))
336
+ }
337
+
338
+ V_IK_pilot = (s2_r_pilot+s2_l_pilot)/f0_pilot
339
+ Vm0_pilot_IK = C2_h*V_IK_pilot
340
+ Vm2_pilot_IK = C2_b*V_IK_pilot
341
+ Vm3_pilot_IK = C2_q*V_IK_pilot
342
+
343
+ * Select Median Sample to compute derivative (as in IK code)
344
+ x_IK_med_l = select(X_l,X_l:>=`medX_l'); y_IK_med_l = select(Y_l,X_l:>=`medX_l')
345
+ x_IK_med_r = select(X_r,X_r:<=`medX_r'); y_IK_med_r = select(Y_r,X_r:<=`medX_r')
346
+ x_IK_med = x_IK_med_r \ x_IK_med_l
347
+ y_IK_med = y_IK_med_r \ y_IK_med_l
348
+ sample_IK = length(x_IK_med)
349
+ X_IK_med_q2 = J(sample_IK, `q3', .)
350
+ for (j=1; j<= `q3' ; j++) {
351
+ X_IK_med_q2[.,j] = (x_IK_med:-`c'):^(j-1)
352
+ }
353
+ X_IK_med_q1 = X_IK_med_q2[.,1::`q2']
354
+ * Add cutoff dummy
355
+ X_IK_med_q2 = (X_IK_med_q2, x_IK_med:>=c)
356
+ X_IK_med_q1 = (X_IK_med_q1, x_IK_med:>=c)
357
+
358
+ *** Compute b_IK
359
+ * Pilot Bandwidth
360
+ N_q_r_pilot_IK = (2*q+3)*C2_q_uni*(s2_r_pilot/f0_pilot)
361
+ N_q_l_pilot_IK = (2*q+3)*C2_q_uni*(s2_l_pilot/f0_pilot)
362
+ m4_pilot_IK = (invsym(cross(X_IK_med_q2, X_IK_med_q2))*cross(X_IK_med_q2, y_IK_med))[q+3,1]
363
+ D_q_pilot_IK = 2*(C1_q_uni*m4_pilot_IK)^2
364
+ h3_r_pilot_IK = (N_q_r_pilot_IK / (N_r*D_q_pilot_IK))^(1/(2*q+5))
365
+ h3_l_pilot_IK = (N_q_l_pilot_IK / (N_l*D_q_pilot_IK))^(1/(2*q+5))
366
+ * Data for derivative
367
+ X_lq_IK_h3=select(X_lq1,X_l:>=c-h3_l_pilot_IK); Y_l_IK_h3 =select(Y_l,X_l:>=c-h3_l_pilot_IK)
368
+ X_rq_IK_h3=select(X_rq1,X_r:<=c+h3_r_pilot_IK); Y_r_IK_h3 =select(Y_r,X_r:<=c+h3_r_pilot_IK)
369
+ m3_l_IK = (invsym(cross(X_lq_IK_h3, X_lq_IK_h3))*cross(X_lq_IK_h3, Y_l_IK_h3))[q+2,1]
370
+ m3_r_IK = (invsym(cross(X_rq_IK_h3, X_rq_IK_h3))*cross(X_rq_IK_h3, Y_r_IK_h3))[q+2,1]
371
+ D_b_IK = 2*(q-p)*(C1_b*(m3_r_IK - (-1)^(`deriv'+`q'+1)*m3_l_IK))^2
372
+ N_b_IK = (2*p+3)*Vm2_pilot_IK
373
+ * Regularization
374
+ temp = rdbwselect_2014_regconst(`q1',1)
375
+ con = temp[`q2',`q2']
376
+ n_l_h3 = length(Y_l_IK_h3); n_r_h3 = length(Y_r_IK_h3)
377
+ r_l_b = (con*s2_l_pilot)/(n_l_h3*h3_l_pilot_IK^(2*`q1'))
378
+ r_r_b = (con*s2_r_pilot)/(n_r_h3*h3_r_pilot_IK^(2*`q1'))
379
+ R_b_IK = `scaleregul'*2*(q-p)*C1_b^2*3*(r_l_b + r_r_b)
380
+ * Final bandwidth:
381
+ b_IK = (N_b_IK / (`N'*(D_b_IK + R_b_IK)))^(1/(2*q+3))
382
+
383
+ *** Compute h_IK
384
+ * Pilot Bandwidth
385
+ N_b_r_pilot_IK = (2*p+3)*C2_b_uni*(s2_r_pilot/f0_pilot)
386
+ N_b_l_pilot_IK = (2*p+3)*C2_b_uni*(s2_l_pilot/f0_pilot)
387
+ m3_pilot_IK = (invsym(cross(X_IK_med_q1, X_IK_med_q1))*cross(X_IK_med_q1, y_IK_med))[q+2,1]
388
+ D_b_pilot_IK = 2*(q-p)*(C1_b_uni*m3_pilot_IK)^2
389
+ h2_l_pilot_IK = (N_b_l_pilot_IK / (N_l*D_b_pilot_IK))^(1/(2*q+3))
390
+ h2_r_pilot_IK = (N_b_r_pilot_IK / (N_r*D_b_pilot_IK))^(1/(2*q+3))
391
+ * Data for derivative
392
+ X_lq_IK_h2=select(X_lq,X_l:>=c-h2_l_pilot_IK); Y_l_IK_h2 =select(Y_l,X_l:>=c-h2_l_pilot_IK)
393
+ X_rq_IK_h2=select(X_rq,X_r:<=c+h2_r_pilot_IK); Y_r_IK_h2 =select(Y_r,X_r:<=c+h2_r_pilot_IK)
394
+ m2_l_IK = (invsym(cross(X_lq_IK_h2, X_lq_IK_h2))*cross(X_lq_IK_h2, Y_l_IK_h2))[p+2,1]
395
+ m2_r_IK = (invsym(cross(X_rq_IK_h2, X_rq_IK_h2))*cross(X_rq_IK_h2, Y_r_IK_h2))[p+2,1]
396
+ D_h_IK = 2*(`p'+1-`deriv')*(C1_h*(m2_r_IK - (-1)^(`deriv'+`p'+1)*m2_l_IK))^2
397
+ N_h_IK = (2*`deriv'+1)*Vm0_pilot_IK
398
+ * Regularization
399
+ temp = rdbwselect_2014_regconst(`p1',1)
400
+ con = temp[`p2',`p2']
401
+ n_l_h2 = length(Y_l_IK_h2); n_r_h2 = length(Y_r_IK_h2)
402
+ r_l_h = (con*s2_l_pilot)/(n_l_h2*h2_l_pilot_IK^(2*`p1'))
403
+ r_r_h = (con*s2_r_pilot)/(n_r_h2*h2_r_pilot_IK^(2*`p1'))
404
+ R_h_IK = `scaleregul'*2*(`p'+1-`deriv')*C1_h^2*3*(r_l_h + r_r_h)
405
+ * Final bandwidth
406
+ h_IK = (N_h_IK / (`N'*(D_h_IK + R_h_IK)))^(1/(2*p+3))
407
+
408
+ *** DJMC (not documented)
409
+ D_h_DM = 2*(`p'+1-`deriv')*C1_h^2*(m2_r_IK^2 + m2_l_IK^2)
410
+ D_b_DM = 2*(`q'-`p')*C1_b^2*(m3_r_IK^2 + m3_l_IK^2)
411
+ h_DM = (N_h_IK / (`N'*D_h_DM))^(1/(2*`p'+3))
412
+ b_DM = (N_b_IK / (`N'*D_b_DM))^(1/(2*`q'+3))
413
+
414
+ st_numscalar("h_IK", h_IK)
415
+ st_numscalar("b_IK", b_IK)
416
+
417
+ if (`b_calc'==0) {
418
+ b_IK = h_IK/`rho'
419
+ }
420
+ st_numscalar("b_IK",b_IK)
421
+ }
422
+
423
+ *********************************************************************
424
+ ********************************** C-V *****************************
425
+ *********************************************************************
426
+ if ("`bwselect'"=="CV" | "`all'"~="") {
427
+
428
+ display("Computing CV bandwidth selector.")
429
+ v_CV_l = 0;w_CV_l = 0
430
+ minindex(X_l, `N_l', v_CV_l, w_CV_l)
431
+ x_sort_l = X_l[v_CV_l];y_sort_l = Y_l[v_CV_l]
432
+ v_CV_r = 0;w_CV_r = 0
433
+ maxindex(X_r, `N_r', v_CV_r, w_CV_r)
434
+ x_sort_r = X_r[v_CV_r];y_sort_r = Y_r[v_CV_r]
435
+ h_CV_min = 0
436
+ if (`N_r'>20 & `N_l'>20){
437
+ h_CV_min = min((abs(x_sort_r[`N_r']-x_sort_r[`N_r'-20]),abs(x_sort_l[`N_l']-x_sort_l[`N_l'-20])))
438
+ }
439
+ h_CV_max = min((abs(x_sort_r[1]-x_sort_r[`N_r']),abs(x_sort_l[1]-x_sort_l[`N_l'])))
440
+ h_CV_jump = min((abs(x_sort_r[1]-x_sort_r[`N_r'])/10,abs(x_sort_l[1]-x_sort_l[`N_l']))/10)
441
+ st_numscalar("h_CV_min", h_CV_min[1,1])
442
+ st_numscalar("h_CV_max", h_CV_max[1,1])
443
+ st_numscalar("h_CV_jump", h_CV_jump[1,1])
444
+ if ("`cvgrid_min'"=="0") {
445
+ cvgrid_min = h_CV_min
446
+ }
447
+ else if ("`cvgrid_min'"!="0") {
448
+ cvgrid_min = `cvgrid_min'
449
+ }
450
+ if ("`cvgrid_max'"=="0") {
451
+ cvgrid_max = h_CV_max
452
+ }
453
+ else if ("`cvgrid_max'"!="0") {
454
+ cvgrid_max = `cvgrid_max'
455
+ }
456
+ if ("`cvgrid_length'"=="0") {
457
+ cvgrid_length = abs(cvgrid_max-cvgrid_min)/20
458
+ }
459
+ else if ("`cvgrid_length'"!="0") {
460
+ cvgrid_length = `cvgrid_length'
461
+ }
462
+ if (cvgrid_min>=cvgrid_max){
463
+ cvgrid_min = 0
464
+ }
465
+ st_numscalar("cvgrid_min", cvgrid_min)
466
+ st_numscalar("cvgrid_max", cvgrid_max)
467
+ st_numscalar("cvgrid_length", cvgrid_length)
468
+ h_CV_seq = range(cvgrid_min, cvgrid_max, cvgrid_length)
469
+ s_CV = length(h_CV_seq)
470
+ CV_l = CV_r = J(1, s_CV, 0)
471
+ n_CV_l = round(`delta'*`N_l')-3
472
+ n_CV_r = round(`delta'*`N_r')-3
473
+
474
+ for (v=1; v<=s_CV; v++) {
475
+ for (k=0; k<=n_CV_l; k++) {
476
+ ind_l = `N_l'-k-1
477
+ x_CV_sort_l = x_sort_l[1::ind_l]
478
+ y_CV_sort_l = y_sort_l[1::ind_l]
479
+ w_CV_sort_l = rdbwselect_2014_kweight(x_CV_sort_l,x_sort_l[ind_l+1],h_CV_seq[v],"`kernel'")
480
+ x_CV_l = select(x_CV_sort_l,w_CV_sort_l:>0)
481
+ y_CV_l = select(y_CV_sort_l,w_CV_sort_l:>0)
482
+ w_CV_l = select(w_CV_sort_l,w_CV_sort_l:>0)
483
+ XX_CV_l = J(length(w_CV_l),`p1',.)
484
+ for (j=1; j<=`p1'; j++) {
485
+ XX_CV_l[.,j] = (x_CV_l :- x_sort_l[ind_l+1]):^(j-1)
486
+ }
487
+ y_CV_hat_l = (invsym(cross(XX_CV_l,w_CV_l,XX_CV_l))*cross(XX_CV_l,w_CV_l,y_CV_l))[1]
488
+ mse_CV_l = (y_sort_l[ind_l+1] - y_CV_hat_l)^2
489
+ CV_l[v] = CV_l[v] + mse_CV_l
490
+ }
491
+ for (k=0; k<=n_CV_r; k++) {
492
+ ind_r = `N_r'-k-1
493
+ x_CV_sort_r = x_sort_r[1::ind_r]
494
+ y_CV_sort_r = y_sort_r[1::ind_r]
495
+ w_CV_sort_r = rdbwselect_2014_kweight(x_CV_sort_r,x_sort_r[ind_r+1],h_CV_seq[v],"`kernel'")
496
+ x_CV_r = select(x_CV_sort_r,w_CV_sort_r:>0)
497
+ y_CV_r = select(y_CV_sort_r,w_CV_sort_r:>0)
498
+ w_CV_r = select(w_CV_sort_r,w_CV_sort_r:>0)
499
+ XX_CV_r = J(length(w_CV_r),`p1',.)
500
+
501
+ for (j=1; j<= `p1' ; j++) {
502
+ XX_CV_r[.,j] = (x_CV_r :- x_sort_r[ind_r+1]):^(j-1)
503
+ }
504
+
505
+ y_CV_hat_r = (invsym(cross(XX_CV_r,w_CV_r,XX_CV_r))*cross(XX_CV_r,w_CV_r,y_CV_r))[1]
506
+ mse_CV_r = (y_sort_r[ind_r+1] - y_CV_hat_r)^2
507
+ CV_r[v] = CV_r[v] + mse_CV_r
508
+ }
509
+ }
510
+
511
+ CV_sum = CV_l + CV_r
512
+ CV_sum_order = order(abs(CV_sum'),1)
513
+ h_CV = h_CV_seq[CV_sum_order]
514
+ h_CV = h_CV[1,1]
515
+
516
+ if (`b_calc'==0) {
517
+ b_CV = h_CV/`rho'
518
+ }
519
+ st_numscalar("h_CV", h_CV)
520
+ st_numscalar("b_CV", b_CV)
521
+ }
522
+ }
523
+
524
+ *******************************************************************************
525
+
526
+ disp ""
527
+ disp in smcl in gr "Bandwidth estimators for RD local polynomial regression"
528
+ disp ""
529
+ disp ""
530
+ disp in smcl in gr "{ralign 21: Cutoff c = `c'}" _col(22) " {c |} " _col(23) in gr "Left of " in yellow "c" _col(36) in gr "Right of " in yellow "c" _col(61) in gr "Number of obs = " in yellow %10.0f `N_l'+`N_r'
531
+ disp in smcl in gr "{hline 22}{c +}{hline 22}" _col(61) in gr "NN matches = " in yellow %10.0f `matches'
532
+ disp in smcl in gr "{ralign 21:Number of obs}" _col(22) " {c |} " _col(23) as result %9.0f `N_l' _col(37) %9.0f `N_r' _col(61) in gr "Kernel type = " in yellow "{ralign 10:`kernel_type'}"
533
+ if ("`all'"=="" & "`bwselect'"!="CV") {
534
+ disp in smcl in gr "{ralign 21:Order loc. poly. (p)}" _col(22) " {c |} " _col(23) as result %9.0f `p' _col(37) %9.0f `p'
535
+ disp in smcl in gr "{ralign 21:Order bias (q)}" _col(22) " {c |} " _col(23) as result %9.0f `q' _col(37) %9.0f `q'
536
+ disp in smcl in gr "{ralign 21:Range of `x'}" _col(22) " {c |} " _col(23) as result %9.3f `range_l' _col(37) %9.3f `range_r'
537
+ }
538
+ if ("`bwselect'"=="CV" | "`all'"!="") {
539
+ disp in smcl in gr "{ralign 21:Order loc. poly. (p)}" _col(22) " {c |} " _col(23) as result %9.0f `p' _col(37) %9.0f `p' _col(61) in gr "Min BW grid = " in yellow %10.5f cvgrid_min
540
+ disp in smcl in gr "{ralign 21:Order bias (q)}" _col(22) " {c |} " _col(23) as result %9.0f `q' _col(37) %9.0f `q' _col(61) in gr "Max BW grid = " in yellow %10.5f cvgrid_max
541
+ disp in smcl in gr "{ralign 21:Range of `x'}" _col(22) " {c |} " _col(23) as result %9.3f `range_l' _col(37) %9.3f `range_r' _col(61) in gr "Length BW grid = " in yellow %10.5f cvgrid_length
542
+ }
543
+
544
+ disp ""
545
+ disp in smcl in gr "{hline 10}{c TT}{hline 35}"
546
+ disp in smcl in gr "{ralign 9:Method}" _col(10) " {c |} " _col(18) "h" _col(30) "b" _col(41) "rho" _n "{hline 10}{c +}{hline 35}"
547
+ if ("`bwselect'"=="IK") {
548
+ disp in smcl in gr "{ralign 9:IK }" _col(10) " {c |} " _col(11) in ye %9.0g h_IK _col(25) in ye %9.0g b_IK _col(38) in ye %9.0g h_IK/b_IK
549
+ }
550
+ if ("`bwselect'"=="CV") {
551
+ disp in smcl in gr "{ralign 9:CV }" _col(10) " {c |} " _col(11) in ye %9.0g h_CV _col(30) in ye "NA" _col(42) in ye %9.0g "NA"
552
+ }
553
+ if ("`all'"~="") {
554
+ disp in smcl in gr "{ralign 9:CCT}" _col(10) " {c |} " _col(11) in ye %9.0g h_CCT _col(25) in ye %9.0g b_CCT _col(38) in ye %9.0g h_CCT/b_CCT
555
+ disp in smcl in gr "{ralign 9:IK}" _col(10) " {c |} " _col(11) in ye %9.0g h_IK _col(25) in ye %9.0g b_IK _col(38) in ye %9.0g h_IK/b_IK
556
+ disp in smcl in gr "{ralign 9:CV}" _col(10) " {c |} " _col(11) in ye %9.0g h_CV _col(30) in ye "NA" _col(42) in ye "NA"
557
+ }
558
+ if ("`bwselect'"=="" & "`all'"=="") | ("`bwselect'"=="CCT" & "`all'"=="") {
559
+ disp in smcl in gr "{ralign 9:CCT}" _col(10) " {c |} " _col(11) in ye %9.0g h_CCT _col(25) in ye %9.0g b_CCT _col(38) in ye %9.0g h_CCT/b_CCT
560
+ }
561
+ disp in smcl in gr "{hline 10}{c BT}{hline 35}"
562
+
563
+ if ("`bwselect'"=="CV" & "`cvplot'"!="" | "`all'"!="" & "`cvplot'"!="") {
564
+ local h_CV= h_CV
565
+ mata rdbwselect_2014_cvplot(CV_sum', h_CV_seq, "xtitle(Grid of bandwidth (h)) ytitle(Cross-Validation objective function) c(l) ylabel(none) xline(`h_CV') title(Cross-Validation objective function)")
566
+ }
567
+
568
+ restore
569
+ ereturn clear
570
+ ereturn scalar N_l = `N_l'
571
+ ereturn scalar N_r = `N_r'
572
+ ereturn scalar c = `c'
573
+ ereturn scalar p = `p'
574
+ ereturn scalar q = `q'
575
+
576
+ if ("`bwselect'"=="CCT" | "`bwselect'"=="" | "`all'"~="") {
577
+ ereturn scalar h_CCT = h_CCT
578
+ ereturn scalar b_CCT = b_CCT
579
+ *ereturn scalar q_CCT = q_CCT
580
+ }
581
+ if ("`bwselect'"=="IK" | "`all'"~="") {
582
+ ereturn scalar h_IK = h_IK
583
+ ereturn scalar b_IK = b_IK
584
+ *ereturn scalar h_djmc = `h_DJMC'
585
+ *ereturn scalar b_djmc = `b_DJMC'
586
+ }
587
+ if ("`bwselect'"=="CV" | "`all'"~="") {
588
+ ereturn scalar h_CV = h_CV
589
+ *ereturn scalar b_CV = b_cv
590
+ }
591
+
592
+ mata mata clear
593
+
594
+ end
595
+
596
+
30/replication_package/Adofiles/rd_2021/rdbwselect_2014.sthlp ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *! version 6.0 2014-10-14}{...}
3
+ {viewerjumpto "Syntax" "rdbwselect##syntax"}{...}
4
+ {viewerjumpto "Description" "rdbwselect##description"}{...}
5
+ {viewerjumpto "Options" "rdbwselect##options"}{...}
6
+ {viewerjumpto "Examples" "rdbwselect##examples"}{...}
7
+ {viewerjumpto "Saved results" "rdbwselect##saved_results"}{...}
8
+
9
+ {title:Title}
10
+
11
+ {p 4 8}{cmd:rdbwselect_2014} {hline 2} Deprecated Bandwidth Selection Procedures for Local-Polynomial Regression-Discontinuity Estimators.{p_end}
12
+
13
+ {p 4 8}{ul:Important}: this command is no longer supported or updated, and it is made available only for backward compatibility purposes. Please use {help rdbwselect:rdbwselect} instead.{p_end}
14
+
15
+
16
+ {marker syntax}{...}
17
+ {title:Syntax}
18
+
19
+ {p 4 8}{cmd:rdbwselect_2014} {it:depvar} {it:indepvar} {ifin}
20
+ [{cmd:,}
21
+ {cmd:c(}{it:#}{cmd:)}
22
+ {cmd:p(}{it:#}{cmd:)}
23
+ {cmd:q(}{it:#}{cmd:)}
24
+ {cmd:deriv(}{it:#}{cmd:)}
25
+ {cmd:rho(}{it:#}{cmd:)}
26
+ {cmd:kernel(}{it:kernelfn}{cmd:)}
27
+ {cmd:bwselect(}{it:bwmethod}{cmd:)}
28
+ {cmd:scaleregul(}{it:#}{cmd:)}
29
+ {cmd:delta(}{it:#}{cmd:)}
30
+ {cmd:cvgrid_min(}{it:#}{cmd:)}
31
+ {cmd:cvgrid_max(}{it:#}{cmd:)}
32
+ {cmd:cvgrid_length(}{it:#}{cmd:)}
33
+ {cmd:cvplot}
34
+ {cmd:vce(}{it:vcemethod}{cmd:)}
35
+ {cmd:matches(}{it:#}{cmd:)}
36
+ {cmd:all}
37
+ ]{p_end}
38
+
39
+ {synoptset 28 tabbed}{...}
40
+
41
+ {marker description}{...}
42
+ {title:Description}
43
+
44
+ {p 4 8}{cmd:rdbwselect_2014} is a deprecated command implementing three bandwidth selectors for local polynomial Regression Discontinuity (RD) point estimators and inference procedures, as described in
45
+ {browse "https://sites.google.com/site/rdpackages/rdrobust/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Calonico, Cattaneo and Titiunik (2014)}.
46
+ This command is no longer supported or updated, and it is made available only for backward compatibility purposes.{p_end}
47
+ {p 8 8}This command uses compiled MATA functions given in
48
+ {it:rdbwselect_2014_functions.do}.{p_end}
49
+
50
+ {p 4 8}The latest version of the {cmd:rdrobust} package includes the following commands:{p_end}
51
+ {p 8 8}{help rdrobust:rdrobust} for point estimation and inference procedures.{p_end}
52
+ {p 8 8}{help rdbwselect:rdbwselect} for data-driven bandwidth selection.{p_end}
53
+ {p 8 8}{help rdplot:rdplot} for data-driven RD plots.{p_end}
54
+
55
+ {p 4 8}For more details, and related Stata and R packages useful for analysis of RD designs, visit:
56
+ {browse "https://sites.google.com/site/rdpackages/"}{p_end}
57
+
58
+
59
+ {marker options}{...}
60
+ {title:Options}
61
+
62
+ {p 4 8}{cmd:c(}{it:#}{cmd:)} specifies the RD cutoff in {it:indepvar}.
63
+ Default is {cmd:c(0)}.
64
+
65
+ {p 4 8}{cmd:p(}{it:#}{cmd:)} specifies the order of the local-polynomial used to construct the point estimator.
66
+ Default is {cmd:p(1)} (local linear regression).
67
+
68
+ {p 4 8}{cmd:q(}{it:#}{cmd:)} specifies the order of the local-polynomial used to construct the bias-correction.
69
+ Default is {cmd:q(2)} (local quadratic regression).
70
+
71
+ {p 4 8}{cmd:deriv(}{it:#}{cmd:)} specifies the order of the derivative of the regression functions to be estimated.
72
+ Default is {cmd:deriv(0)} (Sharp RD, or Fuzzy RD if {cmd:fuzzy(.)} is also specified). Setting {cmd:deriv(1)} results in estimation of a Kink RD design (up to scale), or Fuzzy Kink RD if {cmd:fuzzy(.)} is also specified.
73
+
74
+ {p 4 8}{cmd:rho(}{it:#}{cmd:)} if specified, sets the pilot bandwidth {it:b} equal to {it:h}/{it:rho}, where {it:h} is computed using the method and options chosen below.
75
+
76
+ {p 4 8}{cmd:kernel(}{it:kernelfn}{cmd:)} specifies the kernel function used to construct the local-polynomial estimator(s). Options are: {opt tri:angular}, {opt epa:nechnikov}, and {opt uni:form}.
77
+ Default is {opt triangular}.
78
+
79
+ {p 4 8}{cmd:bwselect(}{it:bwmethod}{cmd:)} specifies the bandwidth selection procedure to be used. By default it computes both {it:h} and {it:b}, unless {it:rho} is specified, in which case it only computes {it:h} and sets {it:b}={it:h}/{it:rho}.
80
+ Options are:{p_end}
81
+ {p 8 12}{opt CCT} for bandwidth selector proposed by Calonico, Cattaneo and Titiunik (2014a). This is the default option.{p_end}
82
+ {p 8 12}{opt IK} for bandwidth selector proposed by Imbens and Kalyanaraman (2012) (only available for Sharp RD design).{p_end}
83
+ {p 8 12}{opt CV} for cross-validation method proposed by Ludwig and Miller (2007) (only available for Sharp RD design).{p_end}
84
+
85
+ {p 4 8}{cmd:scaleregul(}{it:#}{cmd:)} specifies scaling factor for the regularization terms of {opt CCT} and {opt IK} bandwidth selectors. Setting {cmd:scaleregul(0)} removes the regularization term from the bandwidth selectors.
86
+ Default is {cmd:scaleregul(1)}.
87
+
88
+ {p 4 8}{cmd:delta(}{it:#}{cmd:)} specifies the quantile that defines the sample used in the cross-validation procedure. This option is used only if {cmd:bwselect(}{opt CV}{cmd:)} is specified.
89
+ Default is {cmd:delta(0.5)}, that is, the median of the control and treated subsamples.
90
+
91
+ {p 4 8}{cmd:cvgrid_min(}{it:#}{cmd:)} specifies the minimum value of the bandwidth grid used in the cross-validation procedure. This option is used only if {cmd:bwselect(}{opt CV}{cmd:)} is specified.
92
+
93
+ {p 4 8}{cmd:cvgrid_max(}{it:#}{cmd:)} specifies the maximum value of the bandwidth grid used in the cross-validation procedure. This option is used only if {cmd:bwselect(}{opt CV}{cmd:)} is specified.
94
+
95
+ {p 4 8}{cmd:cvgrid_length(}{it:#}{cmd:)} specifies the bin length of the (evenly-spaced) bandwidth grid used in the cross-validation procedure. This option is used only if {cmd:bwselect(}{opt CV}{cmd:)} is specified.
96
+
97
+ {p 4 8}{cmd:cvplot} if specified, {cmd:rdbwselect} also reports a graph of the CV objective function. This option is used only if {cmd:bwselect(}{opt CV}{cmd:)} is specified.
98
+
99
+ {p 4 8}{cmd:vce(}{it:vcemethod}{cmd:)} specifies the procedure used to compute the variance-covariance matrix estimator. This option is used only if {opt CCT} or {opt IK} bandwidth procedures are used.
100
+ Options are:{p_end}
101
+ {p 8 12}{opt nn} for nearest-neighbor matches residuals using {cmd:matches(}{it:#}{cmd:)} matches. This is the default option (with {cmd:matches(3)}, see below).{p_end}
102
+ {p 8 12}{opt resid} for estimated plug-in residuals using {it:h} bandwidth.{p_end}
103
+
104
+ {p 4 8}{cmd:matches(}{it:#}{cmd:)} specifies the number of matches in the nearest-neighbor based variance-covariance matrix estimator. This option is used only when nearest-neighbor matches residuals are employed.
105
+ Default is {cmd:matches(3)}.
106
+
107
+ {p 4 8}{cmd:all} if specified, {cmd:rdbwselect} reports three different procedures:{p_end}
108
+ {p 8 12}{opt CCT} for bandwidth selector proposed by Calonico, Cattaneo and Titiunik (2014).{p_end}
109
+ {p 8 12}{opt IK} for bandwidth selector proposed by Imbens and Kalyanaraman (2012).{p_end}
110
+ {p 8 12}{opt CV} for cross-validation method proposed by Ludwig and Miller (2007).{p_end}
111
+
112
+ {hline}
113
+
114
+
115
+ {title:References}
116
+
117
+ {p 4 8}Calonico, S., Cattaneo, M. D., and R. Titiunik. 2014. Robust Data-Driven Inference in the Regression-Discontinuity Design. {it:Stata Journal} 14(4): 909-946.
118
+ {browse "https://sites.google.com/site/rdpackages/rdrobust/Calonico-Cattaneo-Titiunik_2014_Stata.pdf"}.
119
+
120
+
121
+ {title:Authors}
122
+
123
+ {p 4 8}Sebastian Calonico, Columbia University, New York, NY.
124
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
125
+
126
+ {p 4 8}Matias D. Cattaneo, Princeton University, Princeton, NJ.
127
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
128
+
129
+ {p 4 8}Max H. Farrell, University of Chicago, Chicago, IL.
130
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
131
+
132
+ {p 4 8}Rocio Titiunik, Princeton University, Princeton, NJ.
133
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
134
+
135
+
30/replication_package/Adofiles/rd_2021/rdbwselect_2014_cvplot.mo ADDED
Binary file (7.02 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdbwselect_2014_kconst.ado ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *!version 6.0 2014-10-14
2
+
3
+ capture program drop rdbwselect_2014_kconst
4
+ program define rdbwselect_2014_kconst, eclass
5
+ syntax anything
6
+
7
+ tokenize "`anything'"
8
+ local p1 `1'
9
+ local p2 `2'
10
+ local kid `3'
11
+
12
+
13
+ if (`kid'==1) {
14
+ if (`p1'==0){
15
+ if (`p2'==0) {
16
+ local C1=0.333333333333333
17
+ local C2=1.33333333333333
18
+ }
19
+ }
20
+ if (`p1'==1){
21
+ if (`p2'==0) {
22
+ local C1=-0.1
23
+ local C2=4.8
24
+ }
25
+ if (`p2'==1) {
26
+ local C1=0.8
27
+ local C2=19.2
28
+ }
29
+ }
30
+ if (`p1'==2){
31
+ if (`p2'==0) {
32
+ local C1=0.0285714285714287
33
+ local C2=10.2857142857143
34
+ }
35
+ if (`p2'==1) {
36
+ local C1=-0.428571428571427
37
+ local C2=274.285714285714
38
+ }
39
+ if (`p2'==2) {
40
+ local C1=1.28571428571428
41
+ local C2=308.571428571429
42
+ }
43
+ }
44
+ if (`p1'==3){
45
+ if (`p2'==0) {
46
+ local C1=-0.00793650793649814
47
+ local C2=17.7777777777783
48
+ }
49
+ if (`p2'==1) {
50
+ local C1=0.190476190476176
51
+ local C2=1600.00000000012
52
+ }
53
+ if (`p2'==2) {
54
+ local C1=-1.00000000000003
55
+ local C2=10080.0000000008
56
+ }
57
+ if (`p2'==3) {
58
+ local C1=1.77777777777777
59
+ local C2=4977.77777777821
60
+ }
61
+ }
62
+ if (`p1'==4){
63
+ if (`p2'==0) {
64
+ local C1=0.00216450216449893
65
+ local C2=27.2727272727347
66
+ }
67
+ if (`p2'==1) {
68
+ local C1=-0.0757575757576774
69
+ local C2=6109.0909090921
70
+ }
71
+ if (`p2'==2) {
72
+ local C1=0.606060606059373
73
+ local C2=115461.818181807
74
+ }
75
+ if (`p2'==3) {
76
+ local C1=-1.81818181818016
77
+ local C2=293236.363636312
78
+ }
79
+ if (`p2'==4) {
80
+ local C1=2.27272727272634
81
+ local C2=80181.8181817951
82
+ }
83
+ }
84
+ if (`p1'==5){
85
+ if (`p2'==0) {
86
+ local C1=-0.000582750584072755
87
+ local C2=38.7692307691143
88
+ }
89
+ if (`p2'==1) {
90
+ local C1=0.0279720279806952
91
+ local C2=18092.3076921598
92
+ }
93
+ if (`p2'==2) {
94
+ local C1=-0.314685314702729
95
+ local C2=781587.692308313
96
+ }
97
+ if (`p2'==3) {
98
+ local C1=1.39860139862503
99
+ local C2=5582769.23070759
100
+ }
101
+ if (`p2'==4) {
102
+ local C1=-2.88461538463889
103
+ local C2=7463076.92303863
104
+ }
105
+ if (`p2'==5) {
106
+ local C1=2.76923076923777
107
+ local C2=1289619.69231728
108
+ }
109
+ }
110
+ if (`p1'==6){
111
+ if (`p2'==0) {
112
+ local C1=0.000155400158703856
113
+ local C2=52.2666666813375
114
+ }
115
+ if (`p2'==1) {
116
+ local C1=-0.00979020967770339
117
+ local C2=45158.4000232623
118
+ }
119
+ if (`p2'==2) {
120
+ local C1=0.146853146718058
121
+ local C2=3810240.00305634
122
+ }
123
+ if (`p2'==3) {
124
+ local C1=-0.897435898077674
125
+ local C2=59136000.0516687
126
+ }
127
+ if (`p2'==4) {
128
+ local C1=2.69230769423302
129
+ local C2=213444000.281064
130
+ }
131
+ if (`p2'==5) {
132
+ local C1=-4.20000000228174
133
+ local C2=174356582.583545
134
+ }
135
+ if (`p2'==6) {
136
+ local C1=3.266666667565
137
+ local C2=20718297.6275418
138
+ }
139
+ }
140
+ if (`p1'==7){
141
+ if (`p2'==0) {
142
+ local C1=-4.11365348327308e-05
143
+ local C2=67.7647062980167
144
+ }
145
+ if (`p2'==1) {
146
+ local C1=0.00329083788165008
147
+ local C2=99614.1186678147
148
+ }
149
+ if (`p2'==2) {
150
+ local C1=-0.0633484429563396
151
+ local C2=14792696.6380086
152
+ }
153
+ if (`p2'==3) {
154
+ local C1=0.506787375546992
155
+ local C2=430475298.36917
156
+ }
157
+ if (`p2'==4) {
158
+ local C1=-2.05882356315851
159
+ local C2=3264437686.45156
160
+ }
161
+ if (`p2'==5) {
162
+ local C1=4.61176469177008
163
+ local C2=6999904063.30826
164
+ }
165
+ if (`p2'==6) {
166
+ local C1=-5.76470586378127
167
+ local C2=3838978717.71456
168
+ }
169
+ if (`p2'==7) {
170
+ local C1=3.76470587169752
171
+ local C2=332572914.642903
172
+ }
173
+ }
174
+ if (`p1'==8){
175
+ if (`p2'==0) {
176
+ local C1=1.07820976609219e-05
177
+ local C2=85.2631303268003
178
+ }
179
+ if (`p2'==1) {
180
+ local C1=-0.0010711795912357
181
+ local C2=200084.040818939
182
+ }
183
+ if (`p2'==2) {
184
+ local C1=0.0257188626565039
185
+ local C2=48530367.293081
186
+ }
187
+ if (`p2'==3) {
188
+ local C1=-0.260059123858809
189
+ local C2=2403408146.73831
190
+ }
191
+ if (`p2'==4) {
192
+ local C1=1.36532014608383
193
+ local C2=33224189297.6117
194
+ }
195
+ if (`p2'==5) {
196
+ local C1=-4.09596675634384
197
+ local C2=146138075172.791
198
+ }
199
+ if (`p2'==6) {
200
+ local C1=7.2817234992981
201
+ local C2=206092142381.58
202
+ }
203
+ if (`p2'==7) {
204
+ local C1=-7.57894077897072
205
+ local C2=80937582484.7007
206
+ }
207
+ if (`p2'==8) {
208
+ local C1=4.2631561756134
209
+ local C2=5335240470.92272
210
+ }
211
+ }
212
+ if (`p1'==9){
213
+ if (`p2'==0) {
214
+ local C1=-4.34125468018465e-06
215
+ local C2=104.759683619607
216
+ }
217
+ if (`p2'==1) {
218
+ local C1=0.000359364319592714
219
+ local C2=373351.594641908
220
+ }
221
+ if (`p2'==2) {
222
+ local C1=-0.0100117437541485
223
+ local C2=139779902.485658
224
+ }
225
+ if (`p2'==3) {
226
+ local C1=0.123909175395966
227
+ local C2=10992402108.5829
228
+ }
229
+ if (`p2'==4) {
230
+ local C1=-0.812669515609741
231
+ local C2=252479011669.66
232
+ }
233
+ if (`p2'==5) {
234
+ local C1=3.12047624588013
235
+ local C2=1983097707753.03
236
+ }
237
+ if (`p2'==6) {
238
+ local C1=-7.36788511276245
239
+ local C2=5634749358454.49
240
+ }
241
+ if (`p2'==7) {
242
+ local C1=10.8265118598938
243
+ local C2=5601398362056.87
244
+ }
245
+ if (`p2'==8) {
246
+ local C1=-9.64254522323608
247
+ local C2=1650683628680.73
248
+ }
249
+ if (`p2'==9) {
250
+ local C1=4.76183295249939
251
+ local C2=85540403080.8251
252
+ }
253
+ }
254
+ if (`p1'==10){
255
+ if (`p2'==0) {
256
+ local C1=-5.92561264056712e-07
257
+ local C2=126.173458124805
258
+ }
259
+ if (`p2'==1) {
260
+ local C1=2.24271789193153e-05
261
+ local C2=655457.487684009
262
+ }
263
+ if (`p2'==2) {
264
+ local C1=0.00358942896127701
265
+ local C2=362125556.143413
266
+ }
267
+ if (`p2'==3) {
268
+ local C1=-0.057509183883667
269
+ local C2=42895674678.736
270
+ }
271
+ if (`p2'==4) {
272
+ local C1=0.448391914367676
273
+ local C2=1532214531495.65
274
+ }
275
+ if (`p2'==5) {
276
+ local C1=-2.11321067810059
277
+ local C2=19636178554116.5
278
+ }
279
+ if (`p2'==6) {
280
+ local C1=6.33000183105469
281
+ local C2=98056668493842
282
+ }
283
+ if (`p2'==7) {
284
+ local C1=-12.2766418457031
285
+ local C2=193660165481737
286
+ }
287
+ if (`p2'==8) {
288
+ local C1=15.3532409667969
289
+ local C2=142664371644737
290
+ }
291
+ if (`p2'==9) {
292
+ local C1=-11.9469718933105
293
+ local C2=32680235535116.8
294
+ }
295
+ if (`p2'==10) {
296
+ local C1=5.25889778137207
297
+ local C2=1366106680644.88
298
+ }
299
+ }
300
+ }
301
+ if (`kid'==2) {
302
+ if (`p1'==0){
303
+ if (`p2'==0) {
304
+ local C1=0.5
305
+ local C2=1
306
+ }
307
+ }
308
+ if (`p1'==1){
309
+ if (`p2'==0) {
310
+ local C1=-0.166666666666666
311
+ local C2=4
312
+ }
313
+ if (`p2'==1) {
314
+ local C1=0.999999999999999
315
+ local C2=12
316
+ }
317
+ }
318
+ if (`p1'==2){
319
+ if (`p2'==0) {
320
+ local C1=0.0499999999999927
321
+ local C2=8.99999999999989
322
+ }
323
+ if (`p2'==1) {
324
+ local C1=-0.599999999999969
325
+ local C2=191.999999999998
326
+ }
327
+ if (`p2'==2) {
328
+ local C1=1.49999999999998
329
+ local C2=179.999999999997
330
+ }
331
+ }
332
+ if (`p1'==3){
333
+ if (`p2'==0) {
334
+ local C1=-0.0142857142856023
335
+ local C2=15.9999999999967
336
+ }
337
+ if (`p2'==1) {
338
+ local C1=0.285714285713908
339
+ local C2=1199.99999999958
340
+ }
341
+ if (`p2'==2) {
342
+ local C1=-1.28571428571377
343
+ local C2=6479.99999999822
344
+ }
345
+ if (`p2'==3) {
346
+ local C1=1.99999999999972
347
+ local C2=2799.99999999933
348
+ }
349
+ }
350
+ if (`p1'==4){
351
+ if (`p2'==0) {
352
+ local C1=0.00396825396776279
353
+ local C2=24.999999999904
354
+ }
355
+ if (`p2'==1) {
356
+ local C1=-0.119047619048388
357
+ local C2=4799.99999999096
358
+ }
359
+ if (`p2'==2) {
360
+ local C1=0.83333333333394
361
+ local C2=79379.9999999445
362
+ }
363
+ if (`p2'==3) {
364
+ local C1=-2.22222222222626
365
+ local C2=179199.999999687
366
+ }
367
+ if (`p2'==4) {
368
+ local C1=2.50000000000273
369
+ local C2=44099.9999998837
370
+ }
371
+ }
372
+ if (`p1'==5){
373
+ if (`p2'==0) {
374
+ local C1=-0.00108225108795068
375
+ local C2=36.0000000009544
376
+ }
377
+ if (`p2'==1) {
378
+ local C1=0.0454545455922926
379
+ local C2=14700.0000007333
380
+ }
381
+ if (`p2'==2) {
382
+ local C1=-0.454545454842446
383
+ local C2=564480.000082575
384
+ }
385
+ if (`p2'==3) {
386
+ local C1=1.81818181864219
387
+ local C2=3628800.00023153
388
+ }
389
+ if (`p2'==4) {
390
+ local C1=-3.40909090952482
391
+ local C2=4410000.00041152
392
+ }
393
+ if (`p2'==5) {
394
+ local C1=3.00000000013097
395
+ local C2=698544.000095851
396
+ }
397
+ }
398
+ if (`p1'==6){
399
+ if (`p2'==0) {
400
+ local C1=0.000291375558163054
401
+ local C2=49.0000000807229
402
+ }
403
+ if (`p2'==1) {
404
+ local C1=-0.0163170211235411
405
+ local C2=37632.0000808351
406
+ }
407
+ if (`p2'==2) {
408
+ local C1=0.2202797360369
409
+ local C2=2857680.00223594
410
+ }
411
+ if (`p2'==3) {
412
+ local C1=-1.22377624921501
413
+ local C2=40320000.092693
414
+ }
415
+ if (`p2'==4) {
416
+ local C1=3.36538464389741
417
+ local C2=133402500.284745
418
+ }
419
+ if (`p2'==5) {
420
+ local C1=-4.84615386696532
421
+ local C2=100590336.250403
422
+ }
423
+ if (`p2'==6) {
424
+ local C1=3.5000000068685
425
+ local C2=11099088.0082191
426
+ }
427
+ }
428
+ if (`p1'==7){
429
+ if (`p2'==0) {
430
+ local C1=-7.77092654971057e-05
431
+ local C2=64.0000038038176
432
+ }
433
+ if (`p2'==1) {
434
+ local C1=0.0055944790947251
435
+ local C2=84672.0058301801
436
+ }
437
+ if (`p2'==2) {
438
+ local C1=-0.0979024390690029
439
+ local C2=11430721.0463556
440
+ }
441
+ if (`p2'==3) {
442
+ local C1=0.717949407175183
443
+ local C2=304920057.289328
444
+ }
445
+ if (`p2'==4) {
446
+ local C1=-2.69230864942074
447
+ local C2=2134440363.92093
448
+ }
449
+ if (`p2'==5) {
450
+ local C1=5.60000113397837
451
+ local C2=4249942482.73742
452
+ }
453
+ if (`p2'==6) {
454
+ local C1=-6.53333409875631
455
+ local C2=2175421444.25063
456
+ }
457
+ if (`p2'==7) {
458
+ local C1=4.00000020302832
459
+ local C2=176679380.680557
460
+ }
461
+ }
462
+ if (`p1'==8){
463
+ if (`p2'==0) {
464
+ local C1=2.18183413380757e-05
465
+ local C2=81.0000794511867
466
+ }
467
+ if (`p2'==1) {
468
+ local C1=-0.00185953138861805
469
+ local C2=172800.541412919
470
+ }
471
+ if (`p2'==2) {
472
+ local C1=0.0407402217388153
473
+ local C2=38420009.7178256
474
+ }
475
+ if (`p2'==3) {
476
+ local C1=-0.380106568336487
477
+ local C2=1756346185.84028
478
+ }
479
+ if (`p2'==4) {
480
+ local C1=1.85295575857162
481
+ local C2=22545053348.8166
482
+ }
483
+ if (`p2'==5) {
484
+ local C1=-5.1882472038269
485
+ local C2=92554313087.018
486
+ }
487
+ if (`p2'==6) {
488
+ local C1=8.64706671237946
489
+ local C2=122367438459.692
490
+ }
491
+ if (`p2'==7) {
492
+ local C1=-8.47059142589569
493
+ local C2=45230142568.556
494
+ }
495
+ if (`p2'==8) {
496
+ local C1=4.50000092387199
497
+ local C2=2815835708.811
498
+ }
499
+ }
500
+ if (`p1'==9){
501
+ if (`p2'==0) {
502
+ local C1=4.68557118438184e-06
503
+ local C2=100.001152185784
504
+ }
505
+ if (`p2'==1) {
506
+ local C1=0.000529960263520479
507
+ local C2=326698.901377387
508
+ }
509
+ if (`p2'==2) {
510
+ local C1=-0.0157094746828079
511
+ local C2=112913004.163799
512
+ }
513
+ if (`p2'==3) {
514
+ local C1=0.18475353717804
515
+ local C2=8245154393.28242
516
+ }
517
+ if (`p2'==4) {
518
+ local C1=-1.13636112213135
519
+ local C2=176755900163.269
520
+ }
521
+ if (`p2'==5) {
522
+ local C1=4.09473609924316
523
+ local C2=1301601790390.11
524
+ }
525
+ if (`p2'==6) {
526
+ local C1=-9.10118865966797
527
+ local C2=3480802183163.29
528
+ }
529
+ if (`p2'==7) {
530
+ local C1=12.6308746337891
531
+ local C2=3267931174533.81
532
+ }
533
+ if (`p2'==8) {
534
+ local C1=-10.6575126647949
535
+ local C2=912326437703.479
536
+ }
537
+ if (`p2'==9) {
538
+ local C1=4.99992036819458
539
+ local C2=44916975840.9114
540
+ }
541
+ }
542
+ if (`p1'==10){
543
+ if (`p2'==0) {
544
+ local C1=0.000199975620489568
545
+ local C2=120.969780523265
546
+ }
547
+ if (`p2'==1) {
548
+ local C1=-0.0060248076915741
549
+ local C2=580633.742406933
550
+ }
551
+ if (`p2'==2) {
552
+ local C1=0.0271593332290649
553
+ local C2=298292189.248646
554
+ }
555
+ if (`p2'==3) {
556
+ local C1=-0.118029594421387
557
+ local C2=32946394271.3983
558
+ }
559
+ if (`p2'==4) {
560
+ local C1=0.678001403808594
561
+ local C2=1103315909981.8
562
+ }
563
+ if (`p2'==5) {
564
+ local C1=-2.8924560546875
565
+ local C2=13331592042473
566
+ }
567
+ if (`p2'==6) {
568
+ local C1=8.1068115234375
569
+ local C2=62878798430702.7
570
+ }
571
+ if (`p2'==7) {
572
+ local C1=-14.8649291992188
573
+ local C2=117374454020910
574
+ }
575
+ if (`p2'==8) {
576
+ local C1=17.6541748046875
577
+ local C2=82487887927301
578
+ }
579
+ if (`p2'==9) {
580
+ local C1=-13.0838928222656
581
+ local C2=17947030260316.1
582
+ }
583
+ if (`p2'==10) {
584
+ local C1=5.49765396118164
585
+ local C2=715867764128.387
586
+ }
587
+ }
588
+ }
589
+ if (`kid'==3) {
590
+ if (`p1'==0){
591
+ if (`p2'==0) {
592
+ local C1=0.375
593
+ local C2=1.2
594
+ }
595
+ }
596
+ if (`p1'==1){
597
+ if (`p2'==0) {
598
+ local C1=-0.115789473684211
599
+ local C2=4.49798179659677
600
+ }
601
+ if (`p2'==1) {
602
+ local C1=0.842105263157895
603
+ local C2=16.7154728927583
604
+ }
605
+ }
606
+ if (`p1'==2){
607
+ if (`p2'==0) {
608
+ local C1=0.033482142857143
609
+ local C2=9.81646825396846
610
+ }
611
+ if (`p2'==1) {
612
+ local C1=-0.464285714285726
613
+ local C2=246.349206349214
614
+ }
615
+ if (`p2'==2) {
616
+ local C1=1.32812500000002
617
+ local C2=266.631944444454
618
+ }
619
+ }
620
+ if (`p1'==3){
621
+ if (`p2'==0) {
622
+ local C1=-0.00936222792511199
623
+ local C2=17.1423583607642
624
+ }
625
+ if (`p2'==1) {
626
+ local C1=0.2102461743182
627
+ local C2=1465.2713806652
628
+ }
629
+ if (`p2'==2) {
630
+ local C1=-1.05655355954787
631
+ local C2=8911.29621722357
632
+ }
633
+ if (`p2'==3) {
634
+ local C1=1.82035928143731
635
+ local C2=4288.56473226901
636
+ }
637
+ }
638
+ if (`p1'==4){
639
+ if (`p2'==0) {
640
+ local C1=0.00256405887983036
641
+ local C2=26.471726419711
642
+ }
643
+ if (`p2'==1) {
644
+ local C1=-0.0847303620927278
645
+ local C2=5670.24522674757
646
+ }
647
+ if (`p2'==2) {
648
+ local C1=0.651151696880333
649
+ local C2=103766.558129494
650
+ }
651
+ if (`p2'==3) {
652
+ local C1=-1.89587024669527
653
+ local C2=257166.288749527
654
+ }
655
+ if (`p2'==4) {
656
+ local C1=2.31540479760156
657
+ local C2=68979.7265596946
658
+ }
659
+ }
660
+ if (`p1'==5){
661
+ if (`p2'==0) {
662
+ local C1=-0.000692327266378356
663
+ local C2=37.8030101065867
664
+ }
665
+ if (`p2'==1) {
666
+ local C1=0.0315930338813359
667
+ local C2=16958.7194854538
668
+ }
669
+ if (`p2'==2) {
670
+ local C1=-0.342512696563062
671
+ local C2=711237.176273326
672
+ }
673
+ if (`p2'==3) {
674
+ local C1=1.47962261931389
675
+ local C2=4964148.04675615
676
+ }
677
+ if (`p2'==4) {
678
+ local C1=-2.98356568117015
679
+ local C2=6513914.18146154
680
+ }
681
+ if (`p2'==5) {
682
+ local C1=2.8119664051992
683
+ local C2=1108538.27359325
684
+ }
685
+ }
686
+ if (`p1'==6){
687
+ if (`p2'==0) {
688
+ local C1=0.000185013186126071
689
+ local C2=51.1354623094499
690
+ }
691
+ if (`p2'==1) {
692
+ local C1=-0.0111415456649411
693
+ local C2=42649.1227734126
694
+ }
695
+ if (`p2'==2) {
696
+ local C1=0.161503519528196
697
+ local C2=3501503.85604356
698
+ }
699
+ if (`p2'==3) {
700
+ local C1=-0.960724128526635
701
+ local C2=53169992.4514926
702
+ }
703
+ if (`p2'==4) {
704
+ local C1=2.81992441765033
705
+ local C2=188508235.652391
706
+ }
707
+ if (`p2'==5) {
708
+ local C1=-4.32027687039226
709
+ local C2=151705648.524794
710
+ }
711
+ if (`p2'==6) {
712
+ local C1=3.30943989104708
713
+ local C2=17800087.7625795
714
+ }
715
+ }
716
+ if (`p1'==7){
717
+ if (`p2'==0) {
718
+ local C1=-4.90504635308753e-05
719
+ local C2=66.4686791385977
720
+ }
721
+ if (`p2'==1) {
722
+ local C1=0.00376765715918737
723
+ local C2=94641.1544249395
724
+ }
725
+ if (`p2'==2) {
726
+ local C1=-0.0702605819096789
727
+ local C2=13702534.6421256
728
+ }
729
+ if (`p2'==3) {
730
+ local C1=0.547905247658491
731
+ local C2=390598570.447533
732
+ }
733
+ if (`p2'==4) {
734
+ local C1=-2.17951306328177
735
+ local C2=2911582861.44089
736
+ }
737
+ if (`p2'==5) {
738
+ local C1=4.79666758701205
739
+ local C2=6153252223.55684
740
+ }
741
+ if (`p2'==6) {
742
+ local C1=-5.90634610503912
743
+ local C2=3332903726.62978
744
+ }
745
+ if (`p2'==7) {
746
+ local C1=3.80750473635271
747
+ local C2=285633131.714089
748
+ }
749
+ }
750
+ if (`p1'==8){
751
+ if (`p2'==0) {
752
+ local C1=1.28009644413396e-05
753
+ local C2=83.8024160962497
754
+ }
755
+ if (`p2'==1) {
756
+ local C1=-0.0012318922963459
757
+ local C2=191016.021735915
758
+ }
759
+ if (`p2'==2) {
760
+ local C1=0.0287254140712321
761
+ local C2=45250162.0718378
762
+ }
763
+ if (`p2'==3) {
764
+ local C1=-0.283516984432936
765
+ local C2=2197551933.29091
766
+ }
767
+ if (`p2'==4) {
768
+ local C1=1.4586429297924
769
+ local C2=29881663369.7835
770
+ }
771
+ if (`p2'==5) {
772
+ local C1=-4.30135545134544
773
+ local C2=129595889329.345
774
+ }
775
+ if (`p2'==6) {
776
+ local C1=7.53462833166122
777
+ local C2=180547839391.581
778
+ }
779
+ if (`p2'==7) {
780
+ local C1=-7.74197471141815
781
+ local C2=70153904195.6161
782
+ }
783
+ if (`p2'==8) {
784
+ local C1=4.30597522854805
785
+ local C2=4581150539.5851
786
+ }
787
+ }
788
+ if (`p1'==9){
789
+ if (`p2'==0) {
790
+ local C1=-2.56981365964748e-06
791
+ local C2=103.136967120782
792
+ }
793
+ if (`p2'==1) {
794
+ local C1=0.000382994418032467
795
+ local C2=357882.128373971
796
+ }
797
+ if (`p2'==2) {
798
+ local C1=-0.0111749973148108
799
+ local C2=131065694.25377
800
+ }
801
+ if (`p2'==3) {
802
+ local C1=0.136033833026886
803
+ local C2=10117813548.0377
804
+ }
805
+ if (`p2'==4) {
806
+ local C1=-0.875308513641357
807
+ local C2=228745243443.724
808
+ }
809
+ if (`p2'==5) {
810
+ local C1=3.30519819259644
811
+ local C2=1772311718505.28
812
+ }
813
+ if (`p2'==6) {
814
+ local C1=-7.69141054153442
815
+ local C2=4976168741761.24
816
+ }
817
+ if (`p2'==7) {
818
+ local C1=11.1589169502258
819
+ local C2=4895118462920.4
820
+ }
821
+ if (`p2'==8) {
822
+ local C1=-9.82742595672607
823
+ local C2=1429179266718.81
824
+ }
825
+ if (`p2'==9) {
826
+ local C1=4.80476921796799
827
+ local C2=73448661555.592
828
+ }
829
+ }
830
+ if (`p1'==10){
831
+ if (`p2'==0) {
832
+ local C1=0.000106673240225064
833
+ local C2=124.520672499749
834
+ }
835
+ if (`p2'==1) {
836
+ local C1=-0.00113465404137969
837
+ local C2=632044.740476285
838
+ }
839
+ if (`p2'==2) {
840
+ local C1=0.00709155201911926
841
+ local C2=342376453.533928
842
+ }
843
+ if (`p2'==3) {
844
+ local C1=-0.0642940998077393
845
+ local C2=39880130461.1472
846
+ }
847
+ if (`p2'==4) {
848
+ local C1=0.481655120849609
849
+ local C2=1403708230458.23
850
+ }
851
+ if (`p2'==5) {
852
+ local C1=-2.25773620605469
853
+ local C2=17759178822082.7
854
+ }
855
+ if (`p2'==6) {
856
+ local C1=6.68578338623047
857
+ local C2=87680850871448.7
858
+ }
859
+ if (`p2'==7) {
860
+ local C1=-12.7984313964844
861
+ local C2=171433376795336
862
+ }
863
+ if (`p2'==8) {
864
+ local C1=15.8130035400391
865
+ local C2=125143858240646
866
+ }
867
+ if (`p2'==9) {
868
+ local C1=-12.1715126037598
869
+ local C2=28437327354854.3
870
+ }
871
+ if (`p2'==10) {
872
+ local C1=5.30557250976562
873
+ local C2=1179682266812.88
874
+ }
875
+ }
876
+ }
877
+
878
+
879
+ ereturn scalar C1=`C1'
880
+ ereturn scalar C2=`C2'
881
+
882
+
883
+
884
+ end
885
+
30/replication_package/Adofiles/rd_2021/rdbwselect_2014_kweight.mo ADDED
Binary file (6.64 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdbwselect_2014_rdvce.mo ADDED
Binary file (9.43 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdbwselect_2014_regconst.mo ADDED
Binary file (6.73 kB). View file
 
30/replication_package/Adofiles/rd_2021/rddensity.ado ADDED
@@ -0,0 +1,1406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ********************************************************************************
2
+ * RDDENSITY STATA PACKAGE -- rddensity
3
+ * Authors: Matias D. Cattaneo, Michael Jansson, Xinwei Ma
4
+ ********************************************************************************
5
+ *!version 2.3 2021-02-28
6
+
7
+ capture program drop rddensityEST
8
+
9
+ program define rddensityEST, eclass
10
+ syntax varlist(max=1) [if] [in] [, ///
11
+ c(real 0) ///
12
+ p(integer 2) ///
13
+ q(integer 0) ///
14
+ fitselect(string) ///
15
+ kernel(string) ///
16
+ h(string) ///
17
+ bwselect(string) ///
18
+ vce(string) ///
19
+ all ///
20
+ noMASSpoints ///
21
+ noREGularize ///
22
+ NLOCalmin (integer -1) ///
23
+ NUNIquemin (integer -1) ///
24
+ ]
25
+
26
+ marksample touse
27
+
28
+ if (`q'==0) local q = `p' + 1
29
+ if ("`fitselect'"=="") local fitselect = "unrestricted"
30
+ local fitselect = lower("`fitselect'")
31
+ if ("`kernel'"=="") local kernel = "triangular"
32
+ local kernel = lower("`kernel'")
33
+ if ("`bwselect'"=="") local bwselect = "comb"
34
+ local bwselect = lower("`bwselect'")
35
+ if ("`vce'"=="") local vce = "jackknife"
36
+ local vce = lower("`vce'")
37
+
38
+ tokenize `h'
39
+ local w : word count `h'
40
+ if `w' == 0 {
41
+ local hl 0
42
+ local hr 0
43
+ }
44
+ if `w' == 1 {
45
+ local hl `"`1'"'
46
+ local hr `"`1'"'
47
+ }
48
+ if `w' == 2 {
49
+ local hl `"`1'"'
50
+ local hr `"`2'"'
51
+ }
52
+ if `w' >= 3 {
53
+ di as error "{err}{cmd:h()} only accepts two inputs."
54
+ exit 125
55
+ }
56
+
57
+ preserve
58
+ qui keep if `touse'
59
+
60
+ local x "`varlist'"
61
+
62
+ qui drop if `x'==.
63
+
64
+ qui su `x'
65
+ local x_min = r(min)
66
+ local x_max = r(max)
67
+ local N = r(N)
68
+
69
+ qui count if `x'<`c'
70
+ local Nl = r(N)
71
+ qui count if `x'>=`c'
72
+ local Nr = r(N)
73
+
74
+ ****************************************************************************
75
+ *** BEGIN ERROR HANDLING ***************************************************
76
+ if (`c'<=`x_min' | `c'>=`x_max'){
77
+ di "{err}{cmd:c()} should be set within the range of `x'."
78
+ exit 125
79
+ }
80
+
81
+ if (`Nl'<10 | `Nr'<10){
82
+ di "{err}Not enough observations to perform calculations."
83
+ exit 2001
84
+ }
85
+
86
+ if (`p'!=1 & `p'!=2 & `p'!=3 & `p'!=4 & `p'!=5 & `p'!=6 & `p'!=7){
87
+ di "{err}{cmd:p()} should be an integer value less or equal than 7."
88
+ exit 125
89
+ }
90
+
91
+ if (`p'>`q'){
92
+ di "{err}{cmd:p()} should be an integer value no larger than {cmd:q()}."
93
+ exit 125
94
+ }
95
+
96
+ if ("`kernel'"!="uniform" & "`kernel'"!="triangular" & "`kernel'"!="epanechnikov"){
97
+ di "{err}{cmd:kernel()} incorrectly specified."
98
+ exit 7
99
+ }
100
+
101
+ if ("`fitselect'"!="restricted" & "`fitselect'"!="unrestricted"){
102
+ di "{err}{cmd:fitselect()} incorrectly specified."
103
+ exit 7
104
+ }
105
+
106
+ if (`hl'<0){
107
+ di "{err}{cmd:hl()} must be a positive real number."
108
+ exit 411
109
+ }
110
+
111
+ if (`hr'<0){
112
+ di "{err}{cmd:hr()} must be a positive real number."
113
+ exit 411
114
+ }
115
+
116
+ if ("`fitselect'"=="restricted" & `hl'!=`hr'){
117
+ di "{err}{{cmd:hl()} and {cmd:hr()} must be equal in the restricted model."
118
+ exit 7
119
+ }
120
+
121
+ if ("`bwselect'"!="each" & "`bwselect'"!="diff" & "`bwselect'"!="sum" & "`bwselect'"!="comb"){
122
+ di "{err}{cmd:bwselect()} incorrectly specified."
123
+ exit 7
124
+ }
125
+
126
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="each"){
127
+ di "{err}{cmd:bwselect(each)} is not available in the restricted model."
128
+ exit 7
129
+ }
130
+
131
+ if ("`vce'"!="jackknife" & "`vce'"!="plugin"){
132
+ di "{err}{cmd:vce()} incorrectly specified."
133
+ exit 7
134
+ }
135
+
136
+ if ("`regularize'" == "") {
137
+ local regularize = 1
138
+ local Tempregularize = "regularize"
139
+ }
140
+ else {
141
+ local regularize = 0
142
+ local Tempregularize = "noregularize"
143
+ }
144
+
145
+ if ("`masspoints'" == "") {
146
+ local masspoints = 1
147
+ local Tempmasspoints = "masspoints"
148
+ }
149
+ else {
150
+ local masspoints = 0
151
+ local Tempmasspoints = "nomasspoints"
152
+ }
153
+
154
+ if (`nlocalmin' < 0) {
155
+ local nlocalmin = 20 + `p' + 1
156
+ }
157
+
158
+ if (`nuniquemin' < 0) {
159
+ local nuniquemin = 20 + `p' + 1
160
+ }
161
+ *** END ERROR HANDLING *****************************************************
162
+ ****************************************************************************
163
+
164
+ ****************************************************************************
165
+ *** BEGIN BANDWIDTH SELECTION **********************************************
166
+ if ("`h'"!="") {
167
+ local bwmethod = "manual"
168
+ }
169
+
170
+ if (`hl'==0 | `hr'==0) {
171
+ local bwmethod = "`bwselect'"
172
+ disp in ye "Computing data-driven bandwidth selectors."
173
+ qui rdbwdensity `x', c(`c') p(`p') kernel(`kernel') fitselect(`fitselect') vce(`vce') ///
174
+ nlocalmin(`nlocalmin') nuniquemin(`nuniquemin') `Tempregularize' `Tempmasspoints'
175
+ mat out = e(h)
176
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="each" & `hl'==0) local hl = out[1,1]
177
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="each" & `hr'==0) local hr = out[2,1]
178
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="diff" & `hl'==0) local hl = out[3,1]
179
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="diff" & `hr'==0) local hr = out[3,1]
180
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="sum" & `hl'==0) local hl = out[4,1]
181
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="sum" & `hr'==0) local hr = out[4,1]
182
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="comb" & `hl'==0) local hl = out[1,1]+out[3,1]+out[4,1] - min(out[1,1],out[3,1],out[4,1]) - max(out[1,1],out[3,1],out[4,1])
183
+ if ("`fitselect'"=="unrestricted" & "`bwselect'"=="comb" & `hr'==0) local hr = out[2,1]+out[3,1]+out[4,1] - min(out[2,1],out[3,1],out[4,1]) - max(out[2,1],out[3,1],out[4,1])
184
+
185
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="diff" & `hl'==0) local hl = out[3,1]
186
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="diff" & `hr'==0) local hr = out[3,1]
187
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="sum" & `hl'==0) local hl = out[4,1]
188
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="sum" & `hr'==0) local hr = out[4,1]
189
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="comb" & `hl'==0) local hl = min(out[3,1],out[4,1])
190
+ if ("`fitselect'"=="restricted" & "`bwselect'"=="comb" & `hr'==0) local hr = min(out[3,1],out[4,1])
191
+ }
192
+ *** END BANDWIDTH SELECTION ************************************************
193
+ ****************************************************************************
194
+ qui replace `x' = `x'-`c'
195
+
196
+ qui count if `x'<0 & `x'>= -`hl'
197
+ if (`r(N)'<5){
198
+ display("{err}Not enough observations on the left to perform calculations.")
199
+ exit(1)
200
+ }
201
+ local Nlh = r(N)
202
+
203
+ qui count if `x'>=0 & `x'<=`hr'
204
+ if (`r(N)'<5){
205
+ display("{err}Not enough observations on the right to perform calculations.")
206
+ exit(1)
207
+ }
208
+ local Nrh = r(N)
209
+ local Nh = `Nlh' + `Nrh'
210
+
211
+ qui sort `x'
212
+
213
+ ****************************************************************************
214
+ *** BEGIN MATA ESTIMATION **************************************************
215
+ mata{
216
+ X = st_data(.,("`x'"), 0)
217
+
218
+ XUnique = rddensity_unique(X)
219
+ freqUnique = XUnique[., 2]
220
+ indexUnique = XUnique[., 4]
221
+ XUnique = XUnique[., 1]
222
+ NUnique = length(XUnique)
223
+ NlUnique = sum(XUnique :< 0)
224
+ NrUnique = sum(XUnique :>= 0)
225
+
226
+ Y = (0..(`N'-1))' :/ (`N'-1)
227
+ if (`masspoints') {
228
+ Y = rddensity_rep(Y[indexUnique], freqUnique)
229
+ }
230
+ masspoints_flag = sum(freqUnique :!= 1) > 0 & `masspoints'
231
+
232
+ Y = select(Y, X :>= -`hl' :& X :<= `hr')
233
+ X = select(X, X :>= -`hl' :& X :<= `hr')
234
+ fV_q = rddensity_fv(Y, X, `Nl', `Nr', `Nlh', `Nrh', `hl', `hr', `q', 1, "`kernel'", "`fitselect'", "`vce'", `masspoints')
235
+ T_q = fV_q[3,1] / sqrt(fV_q[3,2])
236
+ st_numscalar("f_ql", fV_q[1,1]); st_numscalar("f_qr", fV_q[2,1])
237
+ st_numscalar("se_ql", sqrt(fV_q[1,2])); st_numscalar("se_qr", sqrt(fV_q[2,2]))
238
+ st_numscalar("se_q", sqrt(fV_q[3,2]))
239
+ st_numscalar("T_q", T_q); st_numscalar("pval_q", 2*normal(-abs(T_q)))
240
+
241
+ if ("`all'"!=""){
242
+ fV_p = rddensity_fv(Y, X, `Nl', `Nr', `Nlh', `Nrh', `hl', `hr', `p', 1, "`kernel'", "`fitselect'", "`vce'", `masspoints')
243
+ T_p = fV_p[3,1] / sqrt(fV_p[3,2])
244
+ st_numscalar("f_pl", fV_p[1,1]); st_numscalar("f_pr", fV_p[2,1])
245
+ st_numscalar("se_pl", sqrt(fV_p[1,2])); st_numscalar("se_pr", sqrt(fV_p[2,2]))
246
+ st_numscalar("se_p", sqrt(fV_p[3,2]))
247
+ st_numscalar("T_p", T_p); st_numscalar("pval_p", 2*normal(-abs(T_p)))
248
+ }
249
+ st_numscalar("masspoints_flag", masspoints_flag)
250
+ *display("Estimation completed.")
251
+ }
252
+ *** END MATA ESTIMATION ****************************************************
253
+ ****************************************************************************
254
+
255
+ ****************************************************************************
256
+ *** BEGIN OUTPUT TABLE *****************************************************
257
+
258
+ if (`hl' > `c'-`x_min') {
259
+ disp ""
260
+ disp "Bandwidth {it:hl} greater than the range of the data."
261
+ }
262
+ if (`hr' > `x_max'-`c') {
263
+ disp ""
264
+ disp "Bandwidth {it:hr} greater than the range of the data."
265
+ }
266
+ if (`Nlh'<20 | `Nrh'<20) disp in red "Bandwidth {it:h} may be too small."
267
+ if (masspoints_flag == 1) {
268
+ disp ""
269
+ disp "Point estimates and standard errors have been adjusted for repeated observations."
270
+ disp "(Use option {it:nomasspoints} to suppress this adjustment.)"
271
+ }
272
+
273
+ disp ""
274
+ disp "RD Manipulation test using local polynomial density estimation."
275
+
276
+ disp ""
277
+ disp in smcl in gr "{ralign 9: c = }" in ye %9.3f `c' _col(19) " {c |}" _col(22) in gr "Left of c" _col(33) in gr "Right of c" _col(53) in gr "Number of obs = " in ye %12.0f `N'
278
+ disp in smcl in gr "{hline 19}{c +}{hline 22}" _col(53) in gr "Model = " in ye "{ralign 12:`fitselect'}"
279
+ disp in smcl in gr "{ralign 18:Number of obs}" _col(19) " {c |} " _col(21) as result %9.0f `Nl' _col(34) %9.0f `Nr' _col(53) in gr "BW method = " in ye "{ralign 12:`bwmethod'}"
280
+ disp in smcl in gr "{ralign 18:Eff. Number of obs}" _col(19) " {c |} " _col(21) as result %9.0f `Nlh' _col(34) %9.0f `Nrh' _col(53) in gr "Kernel = " in ye "{ralign 12:`kernel'}"
281
+ disp in smcl in gr "{ralign 18:Order est. (p)}" _col(19) " {c |} " _col(21) as result %9.0f `p' _col(34) %9.0f `p' _col(53) in gr "VCE method = " in ye "{ralign 12:`vce'}"
282
+ disp in smcl in gr "{ralign 18:Order bias (q)}" _col(19) " {c |} " _col(21) as result %9.0f `q' _col(34) %9.0f `q'
283
+ disp in smcl in gr "{ralign 18:BW est. (h)}" _col(19) " {c |} " _col(21) as result %9.3f `hl' _col(34) %9.3f `hr'
284
+
285
+ disp ""
286
+ disp "Running variable: `x'."
287
+ disp in smcl in gr "{hline 19}{c TT}{hline 22}"
288
+ disp in smcl in gr "{ralign 18:Method}" _col(19) " {c |} " _col(23) " T" _col(38) "P>|T|"
289
+ disp in smcl in gr "{hline 19}{c +}{hline 22}"
290
+ if ("`all'"!="" & `q'>`p'){
291
+ disp in smcl in gr "{ralign 18:Conventional}" _col(19) " {c |} " _col(21) in ye %9.4f T_p _col(34) %9.4f pval_p
292
+ }
293
+ if (`q'>`p') {
294
+ disp in smcl in gr "{ralign 18:Robust}" _col(19) " {c |} " _col(21) in ye %9.4f T_q _col(34) %9.4f pval_q
295
+ }
296
+ else {
297
+ disp in smcl in gr "{ralign 18:Conventional}" _col(19) " {c |} " _col(21) in ye %9.4f T_q _col(34) %9.4f pval_q
298
+ }
299
+
300
+
301
+ disp in smcl in gr "{hline 19}{c BT}{hline 22}"
302
+ disp ""
303
+
304
+ *** END OUTPUT TABLE *******************************************************
305
+ ****************************************************************************
306
+
307
+ restore
308
+
309
+ ereturn clear
310
+ ereturn scalar c = `c'
311
+ ereturn scalar p = `p'
312
+ ereturn scalar q = `q'
313
+ ereturn scalar N_l = `Nl'
314
+ ereturn scalar N_r = `Nr'
315
+ ereturn scalar N_h_l = `Nlh'
316
+ ereturn scalar N_h_r = `Nrh'
317
+ ereturn scalar h_l = `hl'
318
+ ereturn scalar h_r = `hr'
319
+ ereturn scalar f_ql = f_ql
320
+ ereturn scalar f_qr = f_qr
321
+ ereturn scalar se_ql = se_ql
322
+ ereturn scalar se_qr = se_qr
323
+ ereturn scalar se_q = se_q
324
+ ereturn scalar pv_q = pval_q
325
+ ereturn scalar T_q = T_q
326
+
327
+ if ("`all'"!=""){
328
+ ereturn scalar f_pl = f_pl
329
+ ereturn scalar f_pr = f_pr
330
+ ereturn scalar se_pl = se_pl
331
+ ereturn scalar se_pr = se_pr
332
+ ereturn scalar se_p = se_p
333
+ ereturn scalar pv_p = pval_p
334
+ ereturn scalar T_p = T_p
335
+ }
336
+
337
+ ereturn local runningvar "`x'"
338
+ ereturn local kernel = "`kernel'"
339
+ ereturn local bwmethod = "`bwmethod'"
340
+ ereturn local vce = "`vce'"
341
+
342
+ mata: mata clear
343
+
344
+ end
345
+
346
+ ********************************************************************************
347
+ * MAIN PROGRAM
348
+ ********************************************************************************
349
+
350
+ capture program drop rddensity
351
+
352
+ program define rddensity, eclass
353
+ syntax varlist(max=1) ///
354
+ [if] [in] [, ///
355
+ /* Estimation */ ///
356
+ C(real 0) ///
357
+ P(integer 2) ///
358
+ Q(integer 0) ///
359
+ FITselect(string) ///
360
+ KERnel(string) ///
361
+ VCE(string) ///
362
+ noMASSpoints ///
363
+ /* Bandwidth selection */ ///
364
+ H(string) ///
365
+ BWselect(string) ///
366
+ noREGularize ///
367
+ NLOCalmin (integer -1) ///
368
+ NUNIquemin (integer -1) ///
369
+ /* Binomial test */ ///
370
+ noBINOmial ///
371
+ bino_n(integer 0) ///
372
+ bino_nstep(integer 0) ///
373
+ bino_w(string) ///
374
+ bino_wstep(string) ///
375
+ bino_nw(integer 10) ///
376
+ bino_p(real 0.5) ///
377
+ /* Plot */ ///
378
+ PLot ///
379
+ plot_range(string) ///
380
+ plot_n(string) ///
381
+ plot_grid(string) ///
382
+ plot_bwselect(string) ///
383
+ plot_ciuniform ///
384
+ plot_cisimul(integer 2000) ///
385
+ plotl_estype(string) ///
386
+ esll_opt(string) ///
387
+ espl_opt(string) ///
388
+ plotr_estype(string) ///
389
+ eslr_opt(string) ///
390
+ espr_opt(string) ///
391
+ plotl_citype(string) ///
392
+ cirl_opt(string) ///
393
+ cill_opt(string) ///
394
+ cibl_opt(string) ///
395
+ plotr_citype(string) ///
396
+ cirr_opt(string) ///
397
+ cilr_opt(string) ///
398
+ cibr_opt(string) ///
399
+ /* Histogram */ ///
400
+ noHISTogram ///
401
+ hist_range(string) ///
402
+ hist_n(string) ///
403
+ hist_width(string) ///
404
+ histl_opt(string) ///
405
+ histr_opt(string) ///
406
+ /* Additional grph options */ ///
407
+ graph_opt(string) ///
408
+ GENVars(string) ///
409
+ /* Reporting */ ///
410
+ LEVel(real 95) ///
411
+ ALL ///
412
+ ]
413
+
414
+ marksample touse
415
+
416
+ local x "`varlist'"
417
+
418
+ ****************************************************************************
419
+ *** CALL: RDDENSITYEST ********************************************************
420
+
421
+ if ("`regularize'" == "") {
422
+ local regularize = "regularize"
423
+ }
424
+ else {
425
+ local regularize = "noregularize"
426
+ }
427
+
428
+ if ("`masspoints'" == "") {
429
+ local masspoints = "masspoints"
430
+ }
431
+ else {
432
+ local masspoints = "nomasspoints"
433
+ }
434
+
435
+ if ("`all'" != "") {
436
+ local all = "all"
437
+ }
438
+ else {
439
+ local all = ""
440
+ }
441
+
442
+ rddensityEST `x' if `touse', ///
443
+ c(`c') p(`p') q(`q') fitselect(`fitselect') kernel(`kernel') h(`h') bwselect(`bwselect') vce(`vce') ///
444
+ `regularize' `masspoints' `all' nlocalmin(`nlocalmin') nuniquemin(`nuniquemin')
445
+
446
+ /// save ereturn results
447
+ local c = e(c)
448
+ local p = e(p)
449
+ local q = e(q)
450
+ local N_l = e(N_l)
451
+ local N_r = e(N_r)
452
+ local N_h_l = e(N_h_l)
453
+ local N_h_r = e(N_h_r)
454
+ local h_l = e(h_l)
455
+ local h_r = e(h_r)
456
+ local f_ql = e(f_ql)
457
+ local f_qr = e(f_qr)
458
+ local se_ql = e(se_ql)
459
+ local se_qr = e(se_qr)
460
+ local se_q = e(se_q)
461
+ local pv_q = e(pv_q)
462
+ local T_q = e(T_q)
463
+
464
+ if ("`all'" != ""){
465
+ local f_pl = e(f_pl)
466
+ local f_pr = e(f_pr)
467
+ local se_pl = e(se_pl)
468
+ local se_pr = e(se_pr)
469
+ local se_p = e(se_p)
470
+ local pv_p = e(pv_p)
471
+ local T_p = e(T_p)
472
+ }
473
+
474
+ local vce = e(vce)
475
+ local bwmethod = e(bwmethod)
476
+ local kernel = e(kernel)
477
+ local runningvar = e(runningvar)
478
+
479
+ ****************************************************************************
480
+ *** BINOMIAL TEST **********************************************************
481
+
482
+ // determine initial window width
483
+ if ("`bino_w'" != "") {
484
+ local flag_ini_window = "w_provided"
485
+ }
486
+ else if (`bino_n' != 0) {
487
+ local flag_ini_window = "n_provided"
488
+ }
489
+ else {
490
+ local flag_ini_window = "automatic"
491
+ }
492
+
493
+ // determine window increment
494
+ if ("`bino_wstep'" != "") {
495
+ local flag_step_window = "w_provided"
496
+ }
497
+ else if (`bino_nstep' != 0) {
498
+ local flag_step_window = "n_provided"
499
+ }
500
+ else {
501
+ local flag_step_window = "automatic"
502
+ }
503
+
504
+ // bino_w check
505
+ tokenize `bino_w'
506
+ local w : word count `bino_w'
507
+ if (`w' == 0) {
508
+ local bino_w_l = 0
509
+ local bino_w_r = 0
510
+ }
511
+ else if (`w' == 1) {
512
+ local bino_w_l `"`1'"'
513
+ local bino_w_r `"`1'"'
514
+ if (`bino_w_l' <= 0) {
515
+ di as err `"{err}{cmd:bino_w()}: incorrectly specified (should be a positive number)"'
516
+ exit 198
517
+ }
518
+ }
519
+ else if (`w' == 2) {
520
+ local bino_w_l `"`1'"'
521
+ local bino_w_r `"`2'"'
522
+ if (`bino_w_l' <= 0 | `bino_w_r' <= 0) {
523
+ di as err `"{err}{cmd:bino_w()}: incorrectly specified (should be positive numbers)"'
524
+ exit 198
525
+ }
526
+ }
527
+ else {
528
+ di as error "{err}{cmd:bino_w()} takes at most two inputs."
529
+ exit 125
530
+ }
531
+
532
+ // bino_n check
533
+ if (`bino_n' > 0) {
534
+ // do nothing
535
+ }
536
+ else if (`bino_n' < 0) {
537
+ di as err `"{err}{cmd:bino_n()}: incorrectly specified (should be a positive integer)"'
538
+ exit 198
539
+ }
540
+ else {
541
+ local bino_n = 20
542
+ }
543
+
544
+ // bino_wstep check
545
+ tokenize `bino_wstep'
546
+ local w : word count `bino_wstep'
547
+ if (`w' == 0) {
548
+ local bino_wstep_l = 0
549
+ local bino_wstep_r = 0
550
+ }
551
+ else if (`w' == 1) {
552
+ local bino_wstep_l `"`1'"'
553
+ local bino_wstep_r `"`1'"'
554
+ if (`bino_wstep_l' <= 0) {
555
+ di as err `"{err}{cmd:bino_wstep()}: incorrectly specified (should be a positive number)"'
556
+ exit 198
557
+ }
558
+ }
559
+ else if (`w' == 2) {
560
+ local bino_wstep_l `"`1'"'
561
+ local bino_wstep_r `"`2'"'
562
+ if (`bino_wstep_l' <= 0 | `bino_wstep_r' <= 0) {
563
+ di as err `"{err}{cmd:bino_wstep()}: incorrectly specified (should be positive numbers)"'
564
+ exit 198
565
+ }
566
+ }
567
+ else {
568
+ di as error "{err}{cmd:bino_wstep()} takes at most two inputs."
569
+ exit 125
570
+ }
571
+
572
+ // bino_nstep check
573
+ if (`bino_nstep' > 0) {
574
+ // do nothing
575
+ }
576
+ else if (`bino_nstep' < 0) {
577
+ di as err `"{err}{cmd:bino_nstep()}: incorrectly specified (should be a positive integer)"'
578
+ exit 198
579
+ }
580
+ else {
581
+ // do nothing
582
+ }
583
+
584
+ // bino_nw check
585
+ if (`bino_nw' <= 0) {
586
+ di as err `"{err}{cmd:bino_nw()}: incorrectly specified (should be a positive integer)"'
587
+ exit 198
588
+ }
589
+
590
+ // bino_p check
591
+ if (`bino_p'<=0 | `bino_p'>=1) {
592
+ di as err `"{err}{cmd:bino_p()}: incorrectly specified (should be between 0 and 1)"'
593
+ exit 198
594
+ }
595
+
596
+ // calculate windows
597
+ mata {
598
+ if ("`binomial'" == "") {
599
+
600
+ X = st_data(.,("`x'"), 0)
601
+ XL = sort(abs(select(X, X :< `c') :- `c'), 1)
602
+ XR = sort(select(X, X :>= `c') :- `c', 1)
603
+ Y = sort(abs(X :- `c'), 1)
604
+ binomTempLWindow = J(`bino_nw', 1, .)
605
+ binomTempRWindow = J(`bino_nw', 1, .)
606
+
607
+ // initial window width
608
+ if ("`flag_ini_window'" == "w_provided") {
609
+ binomTempLWindow[1] = `bino_w_l'
610
+ binomTempRWindow[1] = `bino_w_r'
611
+ }
612
+ else {
613
+ binomTempLWindow[1] = Y[min((`bino_n', `N_l'+`N_r'))]
614
+ binomTempRWindow[1] = binomTempLWindow[1]
615
+ }
616
+
617
+ // window increment
618
+ if (`bino_nw' > 1) {
619
+ if ("`flag_step_window'" == "w_provided") {
620
+ binomTempLWindow[2..`bino_nw', 1] = (1..(`bino_nw'-1))' :* `bino_wstep_l' :+ binomTempLWindow[1]
621
+ binomTempRWindow[2..`bino_nw', 1] = (1..(`bino_nw'-1))' :* `bino_wstep_r' :+ binomTempRWindow[1]
622
+ }
623
+ else if ("`flag_step_window'" == "n_provided") {
624
+ for (jj=2; jj<=`bino_nw'; jj++) {
625
+ if ("`flag_ini_window'" == "w_provided") {
626
+ binomTempLWindow[jj] = Y[min((sum(XL :<= binomTempLWindow[1]) + sum(XR :<= binomTempRWindow[1]) + (jj-1) * `bino_nstep', `N_l'+`N_r'))]
627
+ binomTempRWindow[jj] = binomTempLWindow[jj]
628
+ }
629
+ else {
630
+ binomTempLWindow[jj] = Y[min((`bino_n' + (jj-1) * `bino_nstep', `N_l'+`N_r'))]
631
+ binomTempRWindow[jj] = binomTempLWindow[jj]
632
+ }
633
+ }
634
+ }
635
+ else {
636
+ if (binomTempLWindow[1] >= `h_l' | binomTempRWindow[1] >= `h_r') {
637
+ // exceed bandwidth on either side
638
+ binomTempLWindow = binomTempLWindow[1]
639
+ binomTempRWindow = binomTempRWindow[1]
640
+ }
641
+ else {
642
+ if (binomTempLWindow[1]*`bino_nw' > `h_l') {
643
+ binomTempLWindow[2..`bino_nw', 1] = (1..(`bino_nw'-1))' :* ((`h_l'-binomTempLWindow[1])/(`bino_nw'-1)) :+ binomTempLWindow[1]
644
+ }
645
+ else {
646
+ binomTempLWindow[2..`bino_nw', 1] = (1..(`bino_nw'-1))' :* binomTempLWindow[1] :+ binomTempLWindow[1]
647
+ }
648
+
649
+ if (binomTempRWindow[1]*`bino_nw' > `h_r') {
650
+ binomTempRWindow[2..`bino_nw', 1] = (1..(`bino_nw'-1))' :* ((`h_r'-binomTempRWindow[1])/(`bino_nw'-1)) :+ binomTempRWindow[1]
651
+ }
652
+ else {
653
+ binomTempRWindow[2..`bino_nw', 1] = (1..(`bino_nw'-1))' :* binomTempRWindow[1] :+ binomTempRWindow[1]
654
+ }
655
+ }
656
+ }
657
+ }
658
+
659
+ // window sample size
660
+ binomTempLN = J(rows(binomTempLWindow), 1, .)
661
+ binomTempRN = J(rows(binomTempLWindow), 1, .)
662
+
663
+ for (jj=1; jj<=rows(binomTempLWindow); jj++) {
664
+ binomTempLN[jj] = sum(XL :<= binomTempLWindow[jj])
665
+ binomTempRN[jj] = sum(XR :<= binomTempRWindow[jj])
666
+ }
667
+
668
+ // binomTempLWindow
669
+ // binomTempRWindow
670
+ // binomTempLN
671
+ // binomTempRN
672
+ // rows(binomTempLWindow)
673
+
674
+ st_matrix("binomTempLeftWindow" , binomTempLWindow)
675
+ st_matrix("binomTempRightWindow", binomTempRWindow)
676
+ st_matrix("binomTempLeftN" , binomTempLN)
677
+ st_matrix("binomTempRightN", binomTempRN)
678
+ st_matrix("binomTempNumber", rows(binomTempLWindow))
679
+ st_matrix("binomTempEqualWindow", sum(binomTempLWindow != binomTempRWindow) == 0)
680
+
681
+ }
682
+ }
683
+
684
+ local binomTempNumber = binomTempNumber[1,1]
685
+ local binomTempEqualWindow = binomTempEqualWindow[1,1]
686
+
687
+ if ("`binomial'" == "") {
688
+ disp in ye "P-values of binomial tests." in gr " (H0: prob = `bino_p')"
689
+ disp in smcl in gr "{hline 19}{c TT}{hline 22}{c TT}{hline 10}"
690
+
691
+
692
+
693
+ if (`binomTempEqualWindow' == 1) {
694
+ disp in smcl in gr "{ralign 18: Window Length / 2}" _col(20) "{c |}" "{ralign 9: <c}" _col(33) "{ralign 9: >=c}" _col(43) "{c |}" _col(49) "P>|T|"
695
+ }
696
+ else {
697
+ disp in smcl in gr "{ralign 18: Window Length}" _col(20) "{c |}" "{ralign 9: <c}" _col(33) "{ralign 9: >=c}" _col(43) "{c |}" _col(49) "P>|T|"
698
+ }
699
+
700
+ disp in smcl in gr "{hline 19}{c +}{hline 22}{c +}{hline 10}"
701
+
702
+ forvalues i = 1(1)`binomTempNumber' {
703
+ local binomTempTotal = binomTempLeftN[`i', 1] + binomTempRightN[`i', 1]
704
+ local binomTempSuccess = binomTempLeftN[`i', 1]
705
+ if (`binomTempTotal' > 0) {
706
+ qui bitesti `binomTempTotal' `binomTempSuccess' `bino_p'
707
+ if (`binomTempEqualWindow' == 1) {
708
+ disp in smcl in ye _col(10) %9.3f binomTempLeftWindow[`i',1] _col(20) "{c |}" %9.0f binomTempLeftN[`i',1] _col(33) %9.0f binomTempRightN[`i',1] _col(43) "{c |}" _col(45) %9.4f r(p)
709
+ }
710
+ else {
711
+ disp in smcl in ye %8.3f binomTempLeftWindow[`i',1] _col(10) "+" %8.3f binomTempRightWindow[`i',1] _col(20) "{c |}" %9.0f binomTempLeftN[`i',1] _col(33) %9.0f binomTempRightN[`i',1] _col(43) "{c |}" _col(45) %9.4f r(p)
712
+ }
713
+ }
714
+ else {
715
+ if (`binomTempEqualWindow' == 1) {
716
+ disp in smcl in ye _col(10) %9.3f binomTempLeftWindow[`i',1] _col(20) "{c |}" %9.0f 0 _col(33) %9.0f 0 _col(43) "{c |}" _col(45) %9.4f 1.0000
717
+ }
718
+ else {
719
+ disp in smcl in ye %8.3f binomTempLeftWindow[`i',1] _col(10) "+" %8.3f binomTempRightWindow[`i',1] _col(20) "{c |}" %9.0f 0 _col(33) %9.0f 0 _col(43) "{c |}" _col(45) %9.4f 1.0000
720
+ }
721
+ }
722
+ }
723
+
724
+ disp in smcl in gr "{hline 19}{c BT}{hline 22}{c BT}{hline 10}"
725
+
726
+ }
727
+
728
+ ****************************************************************************
729
+ *** LPDENSITY **************************************************************
730
+
731
+ // plot_range
732
+ tokenize `plot_range'
733
+ local w : word count `plot_range'
734
+ if `w' == 0 {
735
+ qui sum `x'
736
+ if (`c' - 3 * `h_l' < r(min)) {
737
+ local plot_range_l = r(min)
738
+ }
739
+ else {
740
+ local plot_range_l = `c' - 3 * `h_l'
741
+ }
742
+ if (`c' + 3 * `h_r' > r(max)) {
743
+ local plot_range_r = r(max)
744
+ }
745
+ else {
746
+ local plot_range_r = `c' + 3 * `h_r'
747
+ }
748
+ }
749
+ if `w' == 1 {
750
+ di as error "{err}{cmd:plot_range()} takes two inputs."
751
+ exit 125
752
+ }
753
+ if `w' == 2 {
754
+ local plot_range_l `"`1'"'
755
+ local plot_range_r `"`2'"'
756
+ }
757
+ if `w' >= 3 {
758
+ di as error "{err}{cmd:plot_range()} takes two inputs."
759
+ exit 125
760
+ }
761
+
762
+ // plot_n
763
+ tokenize `plot_n'
764
+ local w : word count `plot_n'
765
+ if `w' == 0 {
766
+ local plot_n_l = 10
767
+ local plot_n_r = 10
768
+ }
769
+ if `w' == 1 {
770
+ local plot_n_l `"`1'"'
771
+ local plot_n_r `"`1'"'
772
+ if (`plot_n_l' <= 0) {
773
+ di as err `"{err}{cmd:plot_n()}: incorrectly specified (should be a positive integer)"'
774
+ exit 198
775
+ }
776
+ }
777
+ if `w' == 2 {
778
+ local plot_n_l `"`1'"'
779
+ local plot_n_r `"`2'"'
780
+ if (`plot_n_l' <= 0 | `plot_n_r' <= 0) {
781
+ di as err `"{err}{cmd:plot_n()}: incorrectly specified (should be positive integers)"'
782
+ exit 198
783
+ }
784
+ }
785
+ if `w' >= 3 {
786
+ di as error "{err}{cmd:plot_n()} takes two inputs."
787
+ exit 125
788
+ }
789
+
790
+ // plot_grid
791
+ if ("`plot_grid'" == "") {
792
+ local plot_grid "es"
793
+ }
794
+ else {
795
+ if ("`plot_grid'" != "es" & "`plot_grid'" != "qs") {
796
+ di as error "{err}{cmd:plot_grid()} incorrectly specified."
797
+ exit 125
798
+ }
799
+ }
800
+
801
+ // level
802
+ if (`level' <= 0 | `level' >= 100) {
803
+ di as err `"{err}{cmd:level()}: incorrectly specified"'
804
+ exit 198
805
+ }
806
+
807
+ // plot
808
+ if ("`plot'" != "") {
809
+ local plot = 1
810
+ capture which lpdensity
811
+ if (_rc == 111) {
812
+ di as error `"{err}plotting feature requires command {cmd:lpdensity}, install with"'
813
+ di as error `"{err}net install lpdensity, from(https://raw.githubusercontent.com/nppackages/lpdensity/master/stata) replace"'
814
+ exit 111
815
+ }
816
+ }
817
+ else {
818
+ local plot = 0
819
+ }
820
+
821
+ if (`plot' == 1) {
822
+
823
+ if (`plot_n_l' + `plot_n_r' > _N) {
824
+ local newN = `plot_n_l' + `plot_n_r'
825
+ set obs `newN'
826
+ }
827
+ tempvar temp_grid
828
+ qui gen `temp_grid' = .
829
+ tempvar temp_bw
830
+ qui gen `temp_bw' = .
831
+ tempvar temp_f
832
+ qui gen `temp_f' = .
833
+ tempvar temp_cil
834
+ qui gen `temp_cil' = .
835
+ tempvar temp_cir
836
+ qui gen `temp_cir' = .
837
+ tempvar temp_group
838
+ qui gen `temp_group' = .
839
+
840
+ }
841
+
842
+ // MATA
843
+ mata{
844
+ ng = `plot_n_l' + `plot_n_r'
845
+ if (`plot' == 1) {
846
+ // generate grid
847
+ if ("`plot_grid'" == "es") {
848
+ grid = ( rangen(`plot_range_l', `c' - ( (`c' - `plot_range_l') / (`plot_n_l' - 1) ), `plot_n_l' - 1) \ `c' \ `c' \ rangen(`c' + ( (`plot_range_r' - `c') / (`plot_n_r' - 1) ), `plot_range_r', `plot_n_r' - 1) )
849
+ } else {
850
+ x = st_data(., "`x'", "`touse'")
851
+ temp1 = mean(x :<= `plot_range_l')
852
+ temp2 = mean(x :<= `c')
853
+ temp3 = mean(x :<= `plot_range_r')
854
+ grid = ( rangen(temp1, temp2 - ( (temp2 - temp1) / (`plot_n_l' - 1) ), `plot_n_l' - 1) \ temp2 \ temp2 \ rangen(temp2 + ( (temp3 - temp2) / (`plot_n_r' - 1) ), temp3, `plot_n_r' - 1) )
855
+ for (j=1; j<=length(grid); j++) {
856
+ grid[j] = rddensity_quantile(x, grid[j])
857
+ }
858
+ grid[`plot_n_l'] = `c'
859
+ grid[`plot_n_l' + 1] = `c'
860
+ }
861
+
862
+ // generate group
863
+ group = ( J(`plot_n_l', 1, 0) \ J(`plot_n_r', 1, 1) )
864
+ // generate bandwidth
865
+ bw = ( J(`plot_n_l', 1, `h_l') \ J(`plot_n_r', 1, `h_r') )
866
+
867
+ st_store((1..ng)', "`temp_grid'", grid)
868
+ st_store((1..ng)', "`temp_group'", group)
869
+ st_store((1..ng)', "`temp_bw'", bw)
870
+ }
871
+ }
872
+
873
+ if (`plot' == 1) {
874
+ local scale_l = (`N_l' - 1) / (`N_l' + `N_r' - 1)
875
+ local scale_r = (`N_r' - 1) / (`N_l' + `N_r' - 1)
876
+
877
+ // left estimation
878
+ tempvar temp_grid_l
879
+ qui gen `temp_grid_l' = `temp_grid' if `temp_group' == 0
880
+ tempvar temp_bw_l
881
+ qui gen `temp_bw_l' = `temp_bw' if `temp_group' == 0
882
+
883
+ // bandwidth selection
884
+ if ("`plot_bwselect'" == "") {
885
+ local plot_bwselect_l = `"bw(`temp_bw_l')"'
886
+ }
887
+ else {
888
+ local plot_bwselect_l = `"bwselect(`plot_bwselect')"'
889
+ }
890
+
891
+ // uniform confidence band
892
+ if ("`plot_ciuniform'" != "") {
893
+ local plot_ciuniform = `"ciuniform cisimul(`plot_cisimul')"'
894
+ }
895
+ else {
896
+ local plot_ciuniform = ""
897
+ }
898
+
899
+ capture lpdensity `x' if `touse' & `x' <= `c', ///
900
+ grid(`temp_grid_l') `plot_bwselect_l' p(`p') q(`q') v(1) kernel(`kernel') scale(`scale_l') level(`level') ///
901
+ `regularize' `masspoints' nlocalmin(`nlocalmin') nuniquemin(`nuniquemin') ///
902
+ `plot_ciuniform'
903
+ if (_rc != 0) {
904
+ di as error `"{err}{cmd:lpdensity} failed. Please try to install the latest version using"'
905
+ di as error `"{err}net install lpdensity, from(https://raw.githubusercontent.com/nppackages/lpdensity/master/stata) replace"'
906
+ di as error `"{err}If error persists, please contact the authors."'
907
+ di as error `"{err}{cmd:lpdensity} error message:"'
908
+ lpdensity `x' if `touse' & `x' <= `c', ///
909
+ grid(`temp_grid_l') `plot_bwselect_l' p(`p') q(`q') v(1) kernel(`kernel') scale(`scale_l') level(`level') ///
910
+ `regularize' `masspoints' nlocalmin(`nlocalmin') nuniquemin(`nuniquemin') ///
911
+ `plot_ciuniform'
912
+ exit 111
913
+ }
914
+ }
915
+
916
+ mata{
917
+ if (`plot' == 1) {
918
+ left = st_matrix("e(result)")
919
+ st_store((1..`plot_n_l')', "`temp_bw'", left[., 2])
920
+ st_store((1..`plot_n_l')', "`temp_f'", left[., 4])
921
+ st_store((1..`plot_n_l')', "`temp_cil'", left[., 8])
922
+ st_store((1..`plot_n_l')', "`temp_cir'", left[., 9])
923
+ }
924
+ }
925
+
926
+ if (`plot' == 1) {
927
+ // right estimation
928
+ tempvar temp_grid_r
929
+ qui gen `temp_grid_r' = `temp_grid' if `temp_group' == 1
930
+ tempvar temp_bw_r
931
+ qui gen `temp_bw_r' = `temp_bw' if `temp_group' == 1
932
+
933
+ if ("`plot_bwselect'" == "") {
934
+ local plot_bwselect_r = `"bw(`temp_bw_r')"'
935
+ }
936
+ else {
937
+ local plot_bwselect_r = `"bwselect(`plot_bwselect')"'
938
+ }
939
+
940
+ capture lpdensity `x' if `touse' & `x' >= `c', ///
941
+ grid(`temp_grid_r') `plot_bwselect_r' p(`p') q(`q') v(1) kernel(`kernel') scale(`scale_r') level(`level') ///
942
+ `regularize' `masspoints' nlocalmin(`nlocalmin') nuniquemin(`nuniquemin') ///
943
+ `plot_ciuniform'
944
+ if (_rc != 0) {
945
+ di as error `"{err}{cmd:lpdensity} failed. Please try to install the latest version using"'
946
+ di as error `"{err}net install lpdensity, from(https://raw.githubusercontent.com/nppackages/lpdensity/master/stata) replace"'
947
+ di as error `"{err}If error persists, please contact the authors."'
948
+ di as error `"{err}{cmd:lpdensity} error message:"'
949
+ lpdensity `x' if `touse' & `x' >= `c', ///
950
+ grid(`temp_grid_r') `plot_bwselect_r' p(`p') q(`q') v(1) kernel(`kernel') scale(`scale_r') level(`level') ///
951
+ `regularize' `masspoints' nlocalmin(`nlocalmin') nuniquemin(`nuniquemin') ///
952
+ `plot_ciuniform'
953
+ exit 111
954
+ }
955
+ }
956
+
957
+ mata{
958
+ if (`plot' == 1) {
959
+ right = st_matrix("e(result)")
960
+ st_store(((`plot_n_l'+1)..(`plot_n_l'+`plot_n_r'))', "`temp_bw'", right[., 2])
961
+ st_store(((`plot_n_l'+1)..(`plot_n_l'+`plot_n_r'))', "`temp_f'", right[., 4])
962
+ st_store(((`plot_n_l'+1)..(`plot_n_l'+`plot_n_r'))', "`temp_cil'", right[., 8])
963
+ st_store(((`plot_n_l'+1)..(`plot_n_l'+`plot_n_r'))', "`temp_cir'", right[., 9])
964
+ }
965
+ }
966
+
967
+ if ("`genvars'" != "" & `plot' == 1) {
968
+ qui gen `genvars'_grid = `temp_grid'
969
+ qui gen `genvars'_bw = `temp_bw'
970
+ qui gen `genvars'_f = `temp_f'
971
+ qui gen `genvars'_cil = `temp_cil'
972
+ qui gen `genvars'_cir = `temp_cir'
973
+ qui gen `genvars'_group = `temp_group'
974
+ label variable `genvars'_grid "rddensity plot: grid"
975
+ label variable `genvars'_bw "rddensity plot: bandwidth"
976
+ label variable `genvars'_f "rddensity plot: point estimate"
977
+ label variable `genvars'_cil "rddensity plot: `level'% CI, left"
978
+ label variable `genvars'_cir "rddensity plot: `level'% CI, right"
979
+ label variable `genvars'_group "rddensity plot: =1 if grid >= `c'"
980
+ }
981
+
982
+
983
+ ****************************************************************************
984
+ *** DEFAULT OPTIONS: HISTOGRAM *********************************************
985
+
986
+ // hist_range
987
+ tokenize `hist_range'
988
+ local w : word count `hist_range'
989
+ if `w' == 0 {
990
+ qui sum `x'
991
+ if (`c' - 3 * `h_l' < r(min)) {
992
+ local hist_range_l = r(min)
993
+ }
994
+ else {
995
+ local hist_range_l = `c' - 3 * `h_l'
996
+ }
997
+ if (`c' + 3 * `h_r' > r(max)) {
998
+ local hist_range_r = r(max)
999
+ }
1000
+ else {
1001
+ local hist_range_r = `c' + 3 * `h_r'
1002
+ }
1003
+ }
1004
+ if `w' == 1 {
1005
+ di as error "{err}{cmd:hist_range()} takes two inputs."
1006
+ exit 125
1007
+ }
1008
+ if `w' == 2 {
1009
+ local hist_range_l `"`1'"'
1010
+ local hist_range_r `"`2'"'
1011
+ }
1012
+ if `w' >= 3 {
1013
+ di as error "{err}{cmd:hist_range()} takes two inputs."
1014
+ exit 125
1015
+ }
1016
+
1017
+ // hist_n
1018
+ tokenize `hist_n'
1019
+ local w : word count `hist_n'
1020
+ if `w' == 0 {
1021
+ // check if hist_width is provided
1022
+ if ("`hist_width'" == "") {
1023
+ // do shonething
1024
+ qui count if `x' < `c' & `x' >= `hist_range_l'
1025
+ local hist_n_l = ceil(min( sqrt(r(N)) , 10 * log(r(N)) / log(10) ))
1026
+ qui count if `x' >= `c' & `x' <= `hist_range_r'
1027
+ local hist_n_r = ceil(min( sqrt(r(N)) , 10 * log(r(N)) / log(10) ))
1028
+ }
1029
+ else {
1030
+ // do nothing. wait until hist_width
1031
+ }
1032
+
1033
+ }
1034
+ if `w' == 1 {
1035
+ local hist_n_l `"`1'"'
1036
+ local hist_n_r `"`1'"'
1037
+ if (`hist_n_l' <= 0) {
1038
+ di as err `"{err}{cmd:hist_n()}: incorrectly specified (should be a positive integer)"'
1039
+ exit 198
1040
+ }
1041
+ }
1042
+ if `w' == 2 {
1043
+ local hist_n_l `"`1'"'
1044
+ local hist_n_r `"`2'"'
1045
+ if (`hist_n_l' <= 0 | `hist_n_r' <= 0) {
1046
+ di as err `"{err}{cmd:hist_n()}: incorrectly specified (should be positive integers)"'
1047
+ exit 198
1048
+ }
1049
+ }
1050
+ if `w' >= 3 {
1051
+ di as error "{err}{cmd:hist_n()} takes at most two inputs."
1052
+ exit 125
1053
+ }
1054
+
1055
+ // hist_width
1056
+ tokenize `hist_width'
1057
+ local w : word count `hist_width'
1058
+ if `w' == 0 {
1059
+ local hist_width_l = (`c' - `hist_range_l') / `hist_n_l'
1060
+ local hist_width_r = (`hist_range_r' - `c') / `hist_n_r'
1061
+ }
1062
+ if `w' == 1 {
1063
+ if ("`hist_n'" == "") {
1064
+ // only hist_width is provided
1065
+ local hist_width_l `"`1'"'
1066
+ local hist_width_r `"`1'"'
1067
+ if (`hist_width_l' <= 0) {
1068
+ di as err `"{err}{cmd:hist_width()}: incorrectly specified (should be a positive number)"'
1069
+ exit 198
1070
+ }
1071
+ local hist_n_l = ceil((`c' - `hist_range_l') / `hist_width_l')
1072
+ local hist_n_r = ceil((`hist_range_r' - `c') / `hist_width_r')
1073
+ }
1074
+ else {
1075
+ // ignore hist_width input, because hist_n is provided
1076
+ local hist_width_l = (`c' - `hist_range_l') / `hist_n_l'
1077
+ local hist_width_r = (`hist_range_r' - `c') / `hist_n_r'
1078
+ }
1079
+ }
1080
+ if `w' == 2 {
1081
+ if ("`hist_n'" == "") {
1082
+ // only hist_width is provided
1083
+ local hist_width_l `"`1'"'
1084
+ local hist_width_r `"`2'"'
1085
+ if (`hist_width_l' <= 0 | `hist_width_r' <= 0) {
1086
+ di as err `"{err}{cmd:hist_width()}: incorrectly specified (should be positive numbers)"'
1087
+ exit 198
1088
+ }
1089
+ local hist_n_l = ceil((`c' - `hist_range_l') / `hist_width_l')
1090
+ local hist_n_r = ceil((`hist_range_r' - `c') / `hist_width_r')
1091
+ }
1092
+ else {
1093
+ // ignore hist_width input, because hist_n is provided
1094
+ local hist_width_l = (`c' - `hist_range_l') / `hist_n_l'
1095
+ local hist_width_r = (`hist_range_r' - `c') / `hist_n_r'
1096
+ }
1097
+ }
1098
+ if `w' >= 3 {
1099
+ di as error "{err}{cmd:hist_width()} takes two inputs."
1100
+ exit 125
1101
+ }
1102
+
1103
+ // histogram
1104
+ if ("`histogram'" != "") {
1105
+ local histogram = 0
1106
+ }
1107
+ else {
1108
+ local histogram = 1
1109
+ }
1110
+
1111
+ if (`histogram' == 1) {
1112
+ if (`hist_n_l' + `hist_n_r' > _N) {
1113
+ local newN = `hist_n_l' + `hist_n_r'
1114
+ set obs `newN'
1115
+ }
1116
+
1117
+ tempvar temp_hist_center
1118
+ qui gen `temp_hist_center' = .
1119
+ tempvar temp_hist_end_l
1120
+ qui gen `temp_hist_end_l' = .
1121
+ tempvar temp_hist_end_r
1122
+ qui gen `temp_hist_end_r' = .
1123
+ tempvar temp_hist_width
1124
+ qui gen `temp_hist_width' = .
1125
+ tempvar temp_hist_height
1126
+ qui gen `temp_hist_height' = .
1127
+ tempvar temp_hist_group
1128
+ qui gen `temp_hist_group' = .
1129
+ }
1130
+
1131
+ // MATA
1132
+ mata{
1133
+
1134
+ if (`histogram' == 1) {
1135
+ ng = `hist_n_l' + `hist_n_r'
1136
+ temp_hist_width = (J(`hist_n_l', 1, `hist_width_l') \ J(`hist_n_r', 1, `hist_width_r'))
1137
+ temp_hist_center = (`c' :- (((`hist_n_l'..1) :- 0.5)' :* `hist_width_l') \ `c' :+ (((1..`hist_n_r') :- 0.5)' :* `hist_width_r'))
1138
+ temp_hist_end_l = (`c' :- (((`hist_n_l'..1))' :* `hist_width_l') \ `c' :+ (((1..`hist_n_r') :- 1)' :* `hist_width_r'))
1139
+ temp_hist_end_r = (`c' :- (((`hist_n_l'..1) :- 1)' :* `hist_width_l') \ `c' :+ (((1..`hist_n_r'))' :* `hist_width_r'))
1140
+ temp_hist_group = (J(`hist_n_l', 1, 0) \ J(`hist_n_r', 1, 1))
1141
+ temp_hist_height = J(ng, 1, .)
1142
+
1143
+ x = st_data(., "`x'", "`touse'")
1144
+
1145
+ for (jj=1; jj<=ng; jj++) {
1146
+ temp_hist_height[jj] = sum(x :>= temp_hist_end_l[jj] :& x :< temp_hist_end_r[jj]) / (`N_l' + `N_r') / temp_hist_width[jj]
1147
+ }
1148
+
1149
+ st_store((1..ng)', "`temp_hist_width'", temp_hist_width)
1150
+ st_store((1..ng)', "`temp_hist_center'", temp_hist_center)
1151
+ st_store((1..ng)', "`temp_hist_end_l'", temp_hist_end_l)
1152
+ st_store((1..ng)', "`temp_hist_end_r'", temp_hist_end_r)
1153
+ st_store((1..ng)', "`temp_hist_height'", temp_hist_height)
1154
+ st_store((1..ng)', "`temp_hist_group'", temp_hist_group)
1155
+ }
1156
+ }
1157
+
1158
+ if ("`genvars'" != "" & `plot' == 1 & `histogram' == 1) {
1159
+ qui gen `genvars'_hist_width = `temp_hist_width'
1160
+ qui gen `genvars'_hist_center = `temp_hist_center'
1161
+ qui gen `genvars'_hist_height = `temp_hist_height'
1162
+ qui gen `genvars'_hist_group = `temp_hist_group'
1163
+ qui gen `genvars'_hist_endl = `temp_hist_end_l'
1164
+ qui gen `genvars'_hist_endr = `temp_hist_end_r'
1165
+ label variable `genvars'_hist_width "histogram plot: histogram bar width"
1166
+ label variable `genvars'_hist_center "histogram plot: histogram bar center"
1167
+ label variable `genvars'_hist_endl "histogram plot: histogram bar left end"
1168
+ label variable `genvars'_hist_endr "histogram plot: histogram bar right end"
1169
+ label variable `genvars'_hist_height "histogram plot: histogram bar height"
1170
+ label variable `genvars'_hist_group "histogram plot: =1 if cell center > `c'"
1171
+ }
1172
+
1173
+ ****************************************************************************
1174
+ *** PLOT *******************************************************************
1175
+
1176
+ if (`plot' == 1) {
1177
+
1178
+ // ci type check, left
1179
+ if ("`plotl_citype'" == "") {
1180
+ local plotl_citype = "region"
1181
+ }
1182
+ else if ("`plotl_citype'" != "all" & "`plotl_citype'" != "region" & "`plotl_citype'" != "line" & "`plotl_citype'" != "ebar" & "`plotl_citype'" != "none") {
1183
+ di as err `"plotl_citype(): incorrectly specified: options(region, line, ebar, all, none)"'
1184
+ exit 198
1185
+ }
1186
+
1187
+ if ("`plotl_citype'" == "region" | "`plotl_citype'" == "all") {
1188
+ if ("`cirl_opt'" == "") {
1189
+ local ci_plot_region_l = `"(rarea `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 0, sort lcolor(white%0) color(red%30))"'
1190
+ }
1191
+ else {
1192
+ local ci_plot_region_l = `"(rarea `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 0, sort `cirl_opt')"'
1193
+ }
1194
+ }
1195
+ else {
1196
+ local ci_plot_region_l = `""'
1197
+ }
1198
+ if ("`plotl_citype'" == "line" | "`plotl_citype'" == "all") {
1199
+ if ("`cill_opt'" == "") {
1200
+ local ci_plot_line_l = `"(rline `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 0, sort color(red%70))"'
1201
+ }
1202
+ else {
1203
+ local ci_plot_line_l = `"(rline `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 0, sort `cill_opt')"'
1204
+ }
1205
+ }
1206
+ else {
1207
+ local ci_plot_line_l = `""'
1208
+ }
1209
+ if ("`plotl_citype'" == "ebar" | "`plotl_citype'" == "all") {
1210
+ if ("`cibl_opt'" == "") {
1211
+ local ci_plot_ebar_l = `"(rcap `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 0, sort color(red%70))"'
1212
+ }
1213
+ else {
1214
+ local ci_plot_ebar_l = `"(rcap `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 0, sort `cibl_opt')"'
1215
+ }
1216
+ }
1217
+ else {
1218
+ local ci_plot_ebar_l = `""'
1219
+ }
1220
+
1221
+ // ci type check, right
1222
+ if ("`plotr_citype'" == "") {
1223
+ local plotr_citype = "region"
1224
+ }
1225
+ else if ("`plotr_citype'" != "all" & "`plotr_citype'" != "region" & "`plotr_citype'" != "line" & "`plotr_citype'" != "ebar" & "`plotr_citype'" != "none") {
1226
+ di as err `"plotr_citype(): incorrectly specified: options(region, line, ebar, all, none)"'
1227
+ exit 198
1228
+ }
1229
+
1230
+ if ("`plotr_citype'" == "region" | "`plotr_citype'" == "all") {
1231
+ if ("`cirr_opt'" == "") {
1232
+ local ci_plot_region_r = `"(rarea `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 1, sort lcolor(white%0) color(blue%30))"'
1233
+ }
1234
+ else {
1235
+ local ci_plot_region_r = `"(rarea `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 1, sort `cirr_opt')"'
1236
+ }
1237
+ }
1238
+ else {
1239
+ local ci_plot_region_r = `""'
1240
+ }
1241
+ if ("`plotr_citype'" == "line" | "`plotr_citype'" == "all") {
1242
+ if ("`cilr_opt'" == "") {
1243
+ local ci_plot_line_r = `"(rline `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 1, sort color(blue%70))"'
1244
+ }
1245
+ else {
1246
+ local ci_plot_line_r = `"(rline `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 1, sort `cilr_opt')"'
1247
+ }
1248
+ }
1249
+ else {
1250
+ local ci_plot_line_r = `""'
1251
+ }
1252
+ if ("`plotr_citype'" == "ebar" | "`plotr_citype'" == "all") {
1253
+ if ("`cibr_opt'" == "") {
1254
+ local ci_plot_ebar_r = `"(rcap `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 1, sort color(blue%70))"'
1255
+ }
1256
+ else {
1257
+ local ci_plot_ebar_r = `"(rcap `temp_cil' `temp_cir' `temp_grid' if `temp_group' == 1, sort `cibr_opt')"'
1258
+ }
1259
+ }
1260
+ else {
1261
+ local ci_plot_ebar_r = `""'
1262
+ }
1263
+
1264
+ // point est type check, left
1265
+
1266
+ if ("`plotl_estype'" == "") {
1267
+ local plotl_estype = "line"
1268
+ }
1269
+ else if ("`plotl_estype'" != "both" & "`plotl_estype'" != "line" & "`plotl_estype'" != "point" & "`plotl_estype'" != "none") {
1270
+ di as err `"plotl_estype(): incorrectly specified: options(line, point, both, none)"'
1271
+ exit 198
1272
+ }
1273
+
1274
+ if ("`plotl_estype'" == "line" | "`plotl_estype'" == "both") {
1275
+ if ("`esll_opt'" == "") {
1276
+ local es_plot_line_l = `"(line `temp_f' `temp_grid' if `temp_group' == 0, sort lcolor(red) lwidth("medthin") lpattern(solid))"'
1277
+ }
1278
+ else {
1279
+ local es_plot_line_l = `"(line `temp_f' `temp_grid' if `temp_group' == 0, sort `esll_opt')"'
1280
+ }
1281
+ }
1282
+ else {
1283
+ local es_plot_line_l = `""'
1284
+ }
1285
+ if ("`plotl_estype'" == "point" | "`plotl_estype'" == "both") {
1286
+ if ("`espl_opt'" == "") {
1287
+ local es_plot_point_l = `"(scatter `temp_f' `temp_grid' if `temp_group' == 0, sort color(red))"'
1288
+ }
1289
+ else {
1290
+ local es_plot_point_l = `"(scatter `temp_f' `temp_grid' if `temp_group' == 0, sort `espl_opt')"'
1291
+ }
1292
+ }
1293
+ else {
1294
+ local es_plot_point_l = `""'
1295
+ }
1296
+
1297
+ // point est type check, right
1298
+
1299
+ if ("`plotr_estype'" == "") {
1300
+ local plotr_estype = "line"
1301
+ }
1302
+ else if ("`plotr_estype'" != "both" & "`plotr_estype'" != "line" & "`plotr_estype'" != "point" & "`plotr_estype'" != "none") {
1303
+ di as err `"plotr_estype(): incorrectly specified: options(line, point, both, none)"'
1304
+ exit 198
1305
+ }
1306
+
1307
+ if ("`plotr_estype'" == "line" | "`plotr_estype'" == "both") {
1308
+ if ("`eslr_opt'" == "") {
1309
+ local es_plot_line_r = `"(line `temp_f' `temp_grid' if `temp_group' == 1, sort lcolor(blue) lwidth("medthin") lpattern(solid))"'
1310
+ }
1311
+ else {
1312
+ local es_plot_line_r = `"(line `temp_f' `temp_grid' if `temp_group' == 1, sort `eslr_opt')"'
1313
+ }
1314
+ }
1315
+ else {
1316
+ local es_plot_line_r = `""'
1317
+ }
1318
+ if ("`plotr_estype'" == "point" | "`plotr_estype'" == "both") {
1319
+ if ("`espr_opt'" == "") {
1320
+ local es_plot_point_r = `"(scatter `temp_f' `temp_grid' if `temp_group' == 1, sort color(blue))"'
1321
+ }
1322
+ else {
1323
+ local es_plot_point_r = `"(scatter `temp_f' `temp_grid' if `temp_group' == 1, sort `espr_opt')"'
1324
+ }
1325
+ }
1326
+ else {
1327
+ local es_plot_point_r = `""'
1328
+ }
1329
+
1330
+ if (`histogram' == 1) {
1331
+ if ("`histl_opt'" == "") {
1332
+ local plot_histogram_l = `"(bar `temp_hist_height' `temp_hist_center' if `temp_hist_center' < `c', barwidth(`hist_width_l') color(red%20))"'
1333
+ }
1334
+ else {
1335
+ local plot_histogram_l = `"(bar `temp_hist_height' `temp_hist_center' if `temp_hist_center' < `c', `histl_opt')"'
1336
+ }
1337
+ if ("`histr_opt'" == "") {
1338
+ local plot_histogram_r = `"(bar `temp_hist_height' `temp_hist_center' if `temp_hist_center' >= `c', barwidth(`hist_width_r') color(blue%20))"'
1339
+ }
1340
+ else {
1341
+ local plot_histogram_r = `"(bar `temp_hist_height' `temp_hist_center' if `temp_hist_center' >= `c', `histr_opt')"'
1342
+ }
1343
+ }
1344
+ else {
1345
+ local plot_histogram_l = ""
1346
+ local plot_histogram_r = ""
1347
+ }
1348
+
1349
+ // graph option check
1350
+ if (`"`graph_opt'"' == "" ) {
1351
+ local graph_opt = `"xline(`c', lcolor(black) lwidth(medthin) lpattern(solid)) legend(off) title("Manipulation Testing Plot", color(gs0)) xtitle("`x'") ytitle("")"'
1352
+ }
1353
+
1354
+ twoway `plot_histogram_l' ///
1355
+ `plot_histogram_r' ///
1356
+ `ci_plot_region_l' ///
1357
+ `ci_plot_line_l' ///
1358
+ `ci_plot_ebar_l' ///
1359
+ `ci_plot_region_r' ///
1360
+ `ci_plot_line_r' ///
1361
+ `ci_plot_ebar_r' ///
1362
+ `es_plot_line_l' ///
1363
+ `es_plot_point_l' ///
1364
+ `es_plot_line_r' ///
1365
+ `es_plot_point_r' ///
1366
+ , ///
1367
+ `graph_opt'
1368
+ }
1369
+
1370
+ ereturn clear
1371
+ ereturn scalar c = `c'
1372
+ ereturn scalar p = `p'
1373
+ ereturn scalar q = `q'
1374
+ ereturn scalar N_l = `N_l'
1375
+ ereturn scalar N_r = `N_r'
1376
+ ereturn scalar N_h_l = `N_h_l'
1377
+ ereturn scalar N_h_r = `N_h_r'
1378
+ ereturn scalar h_l = `h_l'
1379
+ ereturn scalar h_r = `h_r'
1380
+ ereturn scalar f_ql = `f_ql'
1381
+ ereturn scalar f_qr = `f_qr'
1382
+ ereturn scalar se_ql = `se_ql'
1383
+ ereturn scalar se_qr = `se_qr'
1384
+ ereturn scalar se_q = `se_q'
1385
+ ereturn scalar pv_q = `pv_q'
1386
+ ereturn scalar T_q = `T_q'
1387
+
1388
+ if ("`all'"!=""){
1389
+ ereturn scalar f_pl = `f_pl'
1390
+ ereturn scalar f_pr = `f_pr'
1391
+ ereturn scalar se_pl = `se_pl'
1392
+ ereturn scalar se_pr = `se_pr'
1393
+ ereturn scalar se_p = `se_p'
1394
+ ereturn scalar pv_p = `pv_p'
1395
+ ereturn scalar T_p = `T_p'
1396
+ }
1397
+
1398
+ ereturn local runningvar "`runningvar'"
1399
+ ereturn local kernel "`kernel'"
1400
+ ereturn local bwmethod "`bwmethod'"
1401
+ ereturn local vce "`vce'"
1402
+
1403
+ mata: mata clear
1404
+
1405
+ end
1406
+
30/replication_package/Adofiles/rd_2021/rddensity.sthlp ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *!version 2.3 2021-02-28}{...}
3
+ {viewerjumpto "Syntax" "rdrobust##syntax"}{...}
4
+ {viewerjumpto "Description" "rdrobust##description"}{...}
5
+ {viewerjumpto "Options" "rdrobust##options"}{...}
6
+ {viewerjumpto "Examples" "rdrobust##examples"}{...}
7
+ {viewerjumpto "Saved results" "rdrobust##saved_results"}{...}
8
+
9
+ {title:Title}
10
+
11
+ {p 4 8}{cmd:rddensity} {hline 2} Manipulation Testing Using Local Polynomial Density Estimation.{p_end}
12
+
13
+ {marker syntax}{...}
14
+ {title:Syntax}
15
+
16
+ {p 4 8}{cmd:rddensity} {it:Var} {ifin}
17
+ [{cmd:,}
18
+ {p_end}
19
+ {p 14 18}
20
+ {cmd:c(}{it:#}{cmd:)}
21
+ {cmd:p(}{it:#}{cmd:)}
22
+ {cmd:q(}{it:#}{cmd:)}
23
+ {cmd:fitselect(}{it:FitMethod}{cmd:)}
24
+ {cmd:kernel(}{it:KernelFn}{cmd:)}
25
+ {cmd:vce(}{it:VceMethod}{cmd:)}
26
+ {cmd:nomasspoints}
27
+ {cmd:level(}{it:#}{cmd:)}
28
+ {cmd:all}
29
+ {p_end}
30
+ {p 14 18}
31
+ {cmd:h(}{it:# #}{cmd:)}
32
+ {cmd:bwselect(}{it:BwMethod}{cmd:)}
33
+ {cmd:nlocalmin(}{it:#}{cmd:)}
34
+ {cmd:nuniquemin(}{it:#}{cmd:)}
35
+ {cmd:noregularize}
36
+ {p_end}
37
+ {p 14 18}
38
+ {cmd:bino_n(}{it:#}{cmd:)}
39
+ {cmd:bino_nstep(}{it:#}{cmd:)}
40
+ {cmd:bino_w(}{it:# #}{cmd:)}
41
+ {cmd:bino_wstep(}{it:# #}{cmd:)}
42
+ {cmd:bino_nw(}{it:#}{cmd:)}
43
+ {cmd:bino_p(}{it:#}{cmd:)}
44
+ {cmd:nobinomial}
45
+ {p_end}
46
+ {p 14 18}
47
+ {cmd:plot}
48
+ {cmd:plot_range(}{it:# #}{cmd:)}
49
+ {cmd:plot_n(}{it:# #}{cmd:)}
50
+ {cmd:plot_grid(}{it:GridMethod}{cmd:)}
51
+ {cmd:plot_bwselect(}{it:BwMethod}{cmd:)}
52
+ {p_end}
53
+ {p 14 18}
54
+ {cmd:plot_ciuniform}
55
+ {cmd:plot_cisimul(}{it:# #}{cmd:)}
56
+ {p_end}
57
+ {p 14 18}
58
+ {cmd:graph_opt(}{it:GraphOpt}{cmd:)}
59
+ {cmd:genvars(}{it:NewVarName}{cmd:)}
60
+ {p_end}
61
+ {p 14 18}
62
+ {cmd:plotl_estype(}{it:EstType}{cmd:)}
63
+ {cmd:esll_opt(}{it:LineOpt}{cmd:)}
64
+ {cmd:espl_opt(}{it:PtOpt}{cmd:)}
65
+ {p_end}
66
+ {p 14 18}
67
+ {cmd:plotr_estype(}{it:EstType}{cmd:)}
68
+ {cmd:eslr_opt(}{it:LineOpt}{cmd:)}
69
+ {cmd:espr_opt(}{it:PtOpt}{cmd:)}
70
+ {p_end}
71
+ {p 14 18}
72
+ {cmd:plotl_citype(}{it:CIType}{cmd:)}
73
+ {cmd: cirl_opt(}{it:AreaOpt}{cmd:)}
74
+ {cmd: cill_opt(}{it:LineOpt}{cmd:)}
75
+ {cmd: cibl_opt(}{it:EbarOpt}{cmd:)}
76
+ {p_end}
77
+ {p 14 18}
78
+ {cmd:plotr_citype(}{it:CIType}{cmd:)}
79
+ {cmd:cirr_opt(}{it:AreaOpt}{cmd:)}
80
+ {cmd:cilr_opt(}{it:LineOpt}{cmd:)}
81
+ {cmd:cibr_opt(}{it:EbarOpt}{cmd:)}
82
+ {p_end}
83
+ {p 14 18}
84
+ {cmd:hist_range(}{it:# #}{cmd:)}
85
+ {cmd:hist_n(}{it:# #}{cmd:)}
86
+ {cmd:hist_width(}{it:# #}{cmd:)}
87
+ {cmd:histl_opt(}{it:BarOpt}{cmd:)}
88
+ {cmd:histr_opt(}{it:BarOpt}{cmd:)}
89
+ {cmd:nohistogram}
90
+ {p_end}
91
+ {p 14 18}
92
+ ]{p_end}
93
+
94
+ {synoptset 28 tabbed}{...}
95
+
96
+ {marker description}{...}
97
+ {title:Description}
98
+
99
+ {p 4 8}{cmd:rddensity} implements manipulation testing procedures using the local polynomial density estimators proposed in
100
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2020_JASA.pdf":Cattaneo, Jansson and Ma (2020)},
101
+ and implements graphical procedures with valid confidence bands using the results in
102
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2021_JoE.pdf":Cattaneo, Jansson and Ma (2021)}.
103
+ In addition, the command provides complementary manipulation testing based on finite sample exact binomial testing following the results in
104
+ {browse "https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf":Cattaneo, Frandsen and Titiunik (2015)}
105
+ and
106
+ {browse "https://rdpackages.github.io/references/Cattaneo-Titiunik-VazquezBare_2017_JPAM.pdf":Cattaneo, Frandsen and Vazquez-Bare (2017)}.
107
+ For an introduction to manipulation testing see McCrary (2008).{p_end}
108
+
109
+ {p 4 8}A detailed introduction to this Stata command is given in {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2018_Stata.pdf":Cattaneo, Jansson and Ma (2018)}.{p_end}
110
+ {p 8 8}Companion {browse "www.r-project.org":R} functions are also available {browse "https://rdpackages.github.io/rddensity":here}.{p_end}
111
+
112
+ {p 4 8}Companion function is {help rdbwdensity:rdbwdensity}.
113
+ For graphical procedures, the
114
+ {browse "https://nppackages.github.io/lpdensity":lpdensity}
115
+ package is required.{p_end}
116
+
117
+ {p 4 8}Related Stata and R packages useful for inference in regression discontinuity (RD) designs are described in the following website:{p_end}
118
+
119
+ {p 8 8}{browse "https://rdpackages.github.io/":https://rdpackages.github.io/}{p_end}
120
+
121
+ {marker options}{...}
122
+ {title:Options}
123
+
124
+ {dlgtab:Density Estimation}
125
+
126
+ {p 4 8}{opt c:}{cmd:(}{it:#}{cmd:)} specifies the threshold or cutoff value in the support of {it:Var}, which determines the two samples (e.g., control and treatment units in RD settings).
127
+ Default is {cmd:c(0)}.{p_end}
128
+
129
+ {p 4 8}{opt p:}{cmd:(}{it:#}{cmd:)} specifies the local polynomial order used to construct the density estimators.
130
+ Default is {cmd:p(2)} (local quadratic approximation).{p_end}
131
+
132
+ {p 4 8}{opt q:}{cmd:(}{it:#}{cmd:)} specifies the local polynomial order used to construct the bias-corrected density estimators.
133
+ Default is {cmd:q(p(}{it:#}{cmd:)+1)} (local cubic approximation for default {cmd:p(2)}).{p_end}
134
+
135
+ {p 4 8}{opt fit:select}{cmd:(}{it:FitMethod}{cmd:)} specifies the density estimation method.{p_end}
136
+ {p 8 12}{opt unrestricted}{bind:} for density estimation without any restrictions (two-sample, unrestricted inference).
137
+ This is the default option.{p_end}
138
+ {p 8 12}{opt restricted}{bind: } for density estimation assuming equal distribution function and higher-order derivatives.{p_end}
139
+
140
+ {p 4 8}{opt ker:nel}{cmd:(}{it:KernelFn}{cmd:)} specifies the kernel function used to construct the local polynomial estimators.{p_end}
141
+ {p 8 12}{opt triangular}{bind: } {it:K(u) = (1 - |u|) * (|u|<=1)}.
142
+ This is the default option.{p_end}
143
+ {p 8 12}{opt epanechnikov}{bind:} {it:K(u) = 0.75 * (1 - u^2) * (|u|<=1)}.{p_end}
144
+ {p 8 12}{opt uniform}{bind: } {it:K(u) = 0.5 * (|u|<=1)}.{p_end}
145
+
146
+ {p 4 8}{opt vce:}{cmd:(}{it:VceMethod}{cmd:)} specifies the procedure used to compute the variance-covariance matrix estimator.{p_end}
147
+ {p 8 12}{opt plugin}{bind: } for asymptotic plug-in standard errors.{p_end}
148
+ {p 8 12}{opt jackknife}{bind:} for jackknife standard errors.
149
+ This is the default option.{p_end}
150
+
151
+ {p 4 8}{opt nomass:points} will not adjust for mass points in the data.{p_end}
152
+
153
+ {p 4 8}{opt lev:el}{cmd:(}{it:#}{cmd:)} specifies the level of the confidence interval, which should be between 0 and 100.
154
+ Default is {cmd:level(95)}.{p_end}
155
+
156
+ {p 4 8}{opt all} if specified, {cmd:rddensity} reports two testing procedures:{p_end}
157
+ {p 8 12}Conventional test statistic (not valid when using MSE-optimal bandwidth choice).{p_end}
158
+ {p 8 12}Robust bias-corrected statistic.
159
+ This is the default option.{p_end}
160
+
161
+
162
+ {dlgtab:Bandwidth Selection}
163
+
164
+ {p 4 8}{opt h:}{cmd:(}{it:#} {it:#}{cmd:)} specifies the bandwidth ({it:h}) used to construct the density estimators on the two sides of the cutoff.
165
+ If not specified, the bandwidth {it:h} is computed by the companion command
166
+ {help rdbwdensity:rdbwdensity}.
167
+ If two bandwidths are specified, the first bandwidth is used for the data below the cutoff and the second bandwidth is used for the data above the cutoff.{p_end}
168
+
169
+ {p 4 8}{opt bw:select}{cmd:(}{it:BwMethod}{cmd:)} specifies the bandwidth selection procedure to be used.{p_end}
170
+ {p 8 12}{opt each}{bind:} based on MSE of each density estimator separately (two distinct bandwidths, {it:hl} and {it:hr}).{p_end}
171
+ {p 8 12}{opt diff}{bind:} based on MSE of difference of two density estimators (one common bandwidth, {it:hl}={it:hr}).{p_end}
172
+ {p 8 12}{opt sum}{bind: } based on MSE of sum of two density estimators (one common bandwidth, {it:hl}={it:hr}).{p_end}
173
+ {p 8 12}{opt comb}{bind:} bandwidth is selected as a combination of the alternatives above.
174
+ This is the default option.{p_end}
175
+ {p 13 17}For {cmd:fitselect(}{opt unrestricted}{cmd:)}, it selects median({opt each},{opt diff},{opt sum}).{p_end}
176
+ {p 13 17}For {cmd:fitselect(}{opt restricted}{cmd:)}, it selects min({opt diff},{opt sum}).{p_end}
177
+
178
+ {p 4 8}{opt nloc:almin}{cmd:(}{it:#}{cmd:)} specifies the minimum number of observations in each local neighborhood.
179
+ This option will be ignored if set to 0, or if {cmd:noregularize} is used.
180
+ Default is {cmd:20+p(}{it:#}{cmd:)+1}.{p_end}
181
+
182
+ {p 4 8}{opt nuni:quemin}{cmd:(}{it:#}{cmd:)} specifies the minimum number of unique observations in each local neighborhood.
183
+ This option will be ignored if set to 0, or if {cmd:noregularize} is used.
184
+ Default is {cmd:20+p(}{it:#}{cmd:)+1}.{p_end}
185
+
186
+ {p 4 8}{opt noreg:ularize} suppresses local sample size checking.{p_end}
187
+
188
+
189
+ {dlgtab:Binomial Test}
190
+
191
+ {p 4 8}{opt bino_w:}{cmd:(}{it:# #}{cmd:)} specifies the half length(s) of the initial window.
192
+ If two values are provided, they will be used for the data below and above the cutoff separately.{p_end}
193
+
194
+ {p 4 8}{opt bino_n:}{cmd:(}{it:#}{cmd:)} specifies the sample size in the initial window.
195
+ This option will be ignored if {opt bino_w:}{cmd:(}{it:# #}{cmd:)} is provided.{p_end}
196
+
197
+ {p 4 8}{opt bino_wstep:}{cmd:(}{it:# #}{cmd:)} specifies the increment in half length(s).{p_end}
198
+
199
+ {p 4 8}{opt bino_nstep:}{cmd:(}{it:#}{cmd:)} specifies the increment in sample size.
200
+ This option will be ignored if {opt bino_wstep:}{cmd:(}{it:# #}{cmd:)} is provided.{p_end}
201
+
202
+ {p 4 8}{opt bino_nw:}{cmd:(}{it:#}{cmd:)} specifies the total number of windows.
203
+ Default is {cmd:10}.{p_end}
204
+
205
+ {p 4 8}{opt bino_p}{cmd:(}{it:#}{cmd:)} specifies the null hypothesis of the binomial test.
206
+ Default is 0.5.{p_end}
207
+
208
+ {p 4 8}{opt nobino:mial} suppresses the binomial test.
209
+ By default, the initial (smallest) window contains 20 observations, and its length is also used as the increment for subsequent windows.{p_end}
210
+
211
+
212
+ {dlgtab:Plotting}
213
+
214
+ {p 4 8}{opt pl:ot} if specified, {cmd:rddensity} plots density estimates and confidence intervals/bands around the cutoff (this feature depends on a companion package {help lpdensity:lpdensity}).
215
+ Note that additional estimation (computing time) is needed.{p_end}
216
+
217
+ {p 4 8}{opt plot_range}{cmd:(}{it:#} {it:#}{cmd:)} specifies the lower and upper bound of the plotting region.
218
+ Default is {it:[c-3*hl,c+3*hr]} (three bandwidths around the cutoff).{p_end}
219
+
220
+ {p 4 8}{opt plot_n}{cmd:(}{it:#} {it:#}{cmd:)} specifies the number of grid points used for plotting on the two sides of the cutoff.
221
+ Default is {cmd:plot_n(10 10)} (i.e., 10 points are used on each side).{p_end}
222
+
223
+ {p 4 8}{opt plot_grid}{cmd:(}{it:GridMethod}{cmd:)} specifies how the grid points are positioned.
224
+ Options are {opt es} (evenly spaced) and {opt qs} (quantile spaced).{p_end}
225
+
226
+ {p 4 8}{opt plot_bwselect}{cmd:(}{it:BwMwthod}{cmd:)} specifies the method for data-driven bandwidth selection.
227
+ Options are {cmd:mse-dpi}, {cmd:imse-dpi}, {cmd:mse-rot}, and {cmd:imse-rot}.
228
+ See {help lpdensity:lpdensity} for additional details.
229
+ If this option is omitted, the same bandwidth(s) used for manipulation testing will be employed.{p_end}
230
+
231
+ {p 4 8}{opt plot_ciuniform} plots uniform confidence bands instead of pointwise confidence intervals.
232
+ The companion option, {opt plot_cisimul}({it:#}), specifies the number of simulations used to construct critical values.
233
+ Default is 2000.{p_end}
234
+
235
+ {p 4 8}{opt graph_opt}({it:GraphOpt}) specifies additional options for plotting, such as legends and labels.{p_end}
236
+
237
+ {p 4 8}{opt genv:ars}({it:NewVarName}) specifies if new variables should be generated to store estimation results.{p_end}
238
+
239
+ {p 4 8}{bf: Remark}. Bias correction is only used for the construction of confidence intervals/bands, but not for point estimation. The point estimates, denoted by f_p, are constructed using local polynomial estimates of order
240
+ {cmd:p(}{it:#}{cmd:)},
241
+ while the centering of the confidence intervals/bands, denoted by f_q, are constructed using local polynomial estimates of order
242
+ {cmd:q(}{it:#}{cmd:)}.
243
+ The confidence intervals/bands take the form:
244
+ [f_q - cv * SE(f_q) , f_q + cv * SE(f_q)],
245
+ where cv denotes the appropriate critical value and SE(f_q) denotes a standard error estimate for the centering of the confidence interval/band.
246
+ As a result, the confidence intervals/bands may not be centered at the point estimates because they have been bias-corrected. Setting
247
+ {cmd:q(}{it:#}{cmd:)}
248
+ and
249
+ {cmd:p(}{it:#}{cmd:)}
250
+ to be equal results on centered at the point estimate confidence intervals/bands, but requires undersmoothing for valid inference (i.e., (I)MSE-optimal bandwdith for the density point estimator cannot be used).
251
+ Hence the bandwidth would need to be specified manually when
252
+ {cmd:q(}{it:#}{cmd:)} = {cmd:p(}{it:#}{cmd:)},
253
+ and the point estimates will not be (I)MSE optimal. See Cattaneo, Jansson and Ma
254
+ ({browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2020_JoE.pdf":2020b}, {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2020_JSS.pdf":2020c})
255
+ for details, and also Calonico, Cattaneo, and Farrell
256
+ ({browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":2018},
257
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_CEopt.pdf":2020})
258
+ for robust bias correction methods.{p_end}
259
+
260
+ {p 8 8} Sometimes the density point estimates may lie outside of the confidence intervals/bands, which can happen if the underlying distribution exhibits high curvature at some evaluation point(s).
261
+ One possible solution in this case is to increase the polynomial order {cmd:p(}{it:#}{cmd:)} or to employ a smaller bandwidth.{p_end}
262
+
263
+
264
+ {dlgtab:Additional Plotting Options: Histogram}
265
+
266
+ {p 4 8}{opt hist_range}{cmd:(}{it:#} {it:#}{cmd:)} specifies the lower and upper bound of the histogram plot.
267
+ Default is {it:[c-3*hl,c+3*hr]} (three bandwidths around the cutoff).{p_end}
268
+
269
+ {p 4 8}{opt hist_n}{cmd:(}{it:#} {it:#}{cmd:)} specifies the number of histogram bars.
270
+ Default is {it:min[sqrt(N),10*log(N)/log(10)]}, where {it:N} is the number of observations within the range specified by {opt hist_range}{cmd:(}{it:#} {it:#}{cmd:)}.{p_end}
271
+
272
+ {p 4 8}{opt hist_width}{cmd:(}{it:#} {it:#}{cmd:)} specifies the width of histogram bars.
273
+ This option will be ignored if {opt hist_range}{cmd:(}{it:#} {it:#}{cmd:)} is provided.{p_end}
274
+
275
+ {p 4 8}{opt nohist:ogram} suppresses the histogram in the background of the plot.{p_end}
276
+
277
+
278
+ {dlgtab:Additional Plotting Options: Below the Cutoff}
279
+
280
+ {p 4 8}{opt plotl_estype}{cmd:(}{it:EstType}{cmd:)} specifies the plotting style of point estimates.{p_end}
281
+ {p 8 12}{opt line}{bind: } a curve.
282
+ This is the default option.{p_end}
283
+ {p 8 12}{opt points}{bind:} individual points.{p_end}
284
+ {p 8 12}{opt both}{bind: } both of the above.{p_end}
285
+ {p 8 12}{opt none}{bind: } will not plot point estimates.{p_end}
286
+
287
+ {p 4 8}{opt esll_opt}{cmd:(}{it:LineOpt}{cmd:)}{bind:} specifies additional {cmd:twoway line}{bind: } options for plotting point estimates.{p_end}
288
+
289
+ {p 4 8}{opt espl_opt}{cmd:(}{it:PtOpt}{cmd:)}{bind: } specifies additional {cmd:twoway scatter}{bind:} options for plotting point estimates.{p_end}
290
+
291
+ {p 4 8}{opt plotl_citype}{cmd:(}{it:EstType}{cmd:)} specifies the plotting style of confidence intervals/bands.{p_end}
292
+ {p 8 12}{opt region}{bind:} shaded region.
293
+ This is the default option.{p_end}
294
+ {p 8 12}{opt line}{bind: } upper and lower bounds.{p_end}
295
+ {p 8 12}{opt ebar}{bind: } error bars.{p_end}
296
+ {p 8 12}{opt all}{bind: } all of the above.{p_end}
297
+ {p 8 12}{opt none}{bind: } will not plot confidence intervals/bands.{p_end}
298
+
299
+ {p 4 8}{opt cirl_opt}{cmd:(}{it:AreaOpt}{cmd:)}{bind:} specifies additional {cmd:twoway rarea}{bind:} options for plotting confidence intervals/regions.{p_end}
300
+
301
+ {p 4 8}{opt cill_opt}{cmd:(}{it:LineOpt}{cmd:)}{bind:} specifies additional {cmd:twoway rline}{bind:} options for plotting confidence intervals/regions.{p_end}
302
+
303
+ {p 4 8}{opt cibl_opt}{cmd:(}{it:EbarOpt}{cmd:)}{bind:} specifies additional {cmd:twoway rcap}{bind:} options for plotting confidence intervals/regions.{p_end}
304
+
305
+ {p 4 8}{opt histl_opt}{cmd:(}{it:BarOpt}{cmd:)}{bind:} specifies additional {cmd:twoway bar}{bind:} options for histogram.{p_end}
306
+
307
+
308
+ {dlgtab:Additional Plotting Options: Above the Cutoff}
309
+
310
+ {p 4 8}{opt plotr_estype}{cmd:(}{it:EstType}{cmd:)} specifies the plotting style of point estimates.{p_end}
311
+ {p 8 12}{opt line}{bind: } a curve.
312
+ This is the default option.{p_end}
313
+ {p 8 12}{opt points}{bind:} individual points.{p_end}
314
+ {p 8 12}{opt both}{bind: } both of the above.{p_end}
315
+ {p 8 12}{opt none}{bind: } will not plot point estimates.{p_end}
316
+
317
+ {p 4 8}{opt eslr_opt}{cmd:(}{it:LineOpt}{cmd:)}{bind:} specifies additional {cmd:twoway line}{bind:} options for plotting point estimates.{p_end}
318
+
319
+ {p 4 8}{opt espr_opt}{cmd:(}{it:PtOpt}{cmd:)}{bind:} specifies additional {cmd:twoway scatter}{bind:} options for plotting point estimates.{p_end}
320
+
321
+ {p 4 8}{opt plotr_citype}{cmd:(}{it:EstType}{cmd:)} specifies the plotting style of confidence intervals/bands.{p_end}
322
+ {p 8 12}{opt region}{bind:} shaded region.
323
+ This is the default option.{p_end}
324
+ {p 8 12}{opt line}{bind: } upper and lower bounds.{p_end}
325
+ {p 8 12}{opt ebar}{bind: } error bars.{p_end}
326
+ {p 8 12}{opt all}{bind: } all of the above.{p_end}
327
+ {p 8 12}{opt none}{bind: } will not plot confidence intervals/bands.{p_end}
328
+
329
+ {p 4 8}{opt cirr_opt}{cmd:(}{it:AreaOpt}{cmd:)}{bind:} specifies additional {cmd:twoway rarea}{bind:} options for plotting confidence intervals/regions.{p_end}
330
+
331
+ {p 4 8}{opt cilr_opt}{cmd:(}{it:LineOpt}{cmd:)}{bind:} specifies additional {cmd:twoway rline}{bind:} options for plotting confidence intervals/regions.{p_end}
332
+
333
+ {p 4 8}{opt cibr_opt}{cmd:(}{it:EbarOpt}{cmd:)}{bind:} specifies additional {cmd:twoway rcap}{bind:} options for plotting confidence intervals/regions.{p_end}
334
+
335
+ {p 4 8}{opt histr_opt}{cmd:(}{it:BarOpt}{cmd:)}{bind:} specifies additional {cmd:twoway bar}{bind:} options for histogram.{p_end}
336
+
337
+
338
+ {marker examples}{...}
339
+ {title:Example: Cattaneo, Frandsen and Titiunik (2015) Incumbency Data}.
340
+
341
+ {p 4 8}Load dataset (cutoff is 0 in this dataset):{p_end}
342
+ {p 8 8}{cmd:. use rddensity_senate.dta}{p_end}
343
+
344
+ {p 4 8}Manipulation test using default options: {p_end}
345
+ {p 8 8}{cmd:. rddensity margin}{p_end}
346
+
347
+ {p 4 8}Reporting both conventional and robust bias-corrected statistics:{p_end}
348
+ {p 8 8}{cmd:. rddensity margin, all}{p_end}
349
+
350
+ {p 4 8}Manipulation test using manual bandwidths choices and plug-in standard errors:{p_end}
351
+ {p 8 8}{cmd:. rddensity margin, h(10 20) vce(plugin)}{p_end}
352
+
353
+ {p 4 8}Plot density and save results to variables:{p_end}
354
+ {p 8 8}{cmd:. capture drop temp_*}{p_end}
355
+ {p 8 8}{cmd:. rddensity margin, pl plot_range(-50 50) plot_n(100 100) genvars(temp) }{p_end}
356
+
357
+
358
+ {marker saved_results}{...}
359
+ {title:Saved results}
360
+
361
+ {p 4 8}{cmd:rddensity} saves the following in {cmd:e()}:
362
+
363
+ {synoptset 20 tabbed}{...}
364
+ {p2col 5 20 24 2: Macros}{p_end}
365
+ {synopt:{cmd:e(c)}}cutoff value{p_end}
366
+ {synopt:{cmd:e(p)}}order of the polynomial used for density estimation{p_end}
367
+ {synopt:{cmd:e(q)}}order of the polynomial used for bias-correction estimation{p_end}
368
+
369
+ {synopt:{cmd:e(N_l)}}sample size to the left of the cutoff{p_end}
370
+ {synopt:{cmd:e(N_r)}}sample size to the right of the cutoff{p_end}
371
+ {synopt:{cmd:e(N_h_l)}}effective sample size (within bandwidth) to the left of the cutoff{p_end}
372
+ {synopt:{cmd:e(N_h_r)}}effective sample size (within bandwidth) to the right of the cutoff{p_end}
373
+ {synopt:{cmd:e(h_l)}}bandwidth used to the left of the cutoff{p_end}
374
+ {synopt:{cmd:e(h_r)}}bandwidth used to the right of the cutoff{p_end}
375
+
376
+ {synopt:{cmd:e(f_ql)}}bias-corrected density estimate to the left of the cutoff{p_end}
377
+ {synopt:{cmd:e(f_qr)}}bias-corrected density estimate to the right of the cutoff{p_end}
378
+ {synopt:{cmd:e(se_ql)}}standard error for bias-corrected density estimate to the left of the cutoff{p_end}
379
+ {synopt:{cmd:e(se_qr)}}standard error for bias-corrected density estimate to the right of the cutoff{p_end}
380
+ {synopt:{cmd:e(se_q)}}standard error for bias-corrected density test{p_end}
381
+ {synopt:{cmd:e(T_q)}}bias-corrected t-statistic{p_end}
382
+ {synopt:{cmd:e(pv_q)}}p-value for bias-corrected density test{p_end}
383
+
384
+ {synopt:{cmd:e(runningvar)}}running variable used{p_end}
385
+ {synopt:{cmd:e(kernel)}}kernel used{p_end}
386
+ {synopt:{cmd:e(fitmethod)}}model used{p_end}
387
+ {synopt:{cmd:e(bwmethod)}}bandwidth selection method used{p_end}
388
+ {synopt:{cmd:e(vce)}}standard errors estimator used{p_end}
389
+
390
+ {p2col 5 20 24 2: Only available if {cmd:all} is specified:}{p_end}
391
+ {synopt:{cmd:e(f_pl)}}density estimate to the left of the cutoff without bias correction {p_end}
392
+ {synopt:{cmd:e(f_pr)}}density estimate to the right of the cutoff without bias correction{p_end}
393
+ {synopt:{cmd:e(se_pl)}}standard error for density estimate to the left of the cutoff without bias correction{p_end}
394
+ {synopt:{cmd:e(se_pr)}}standard error for density estimate to the right of the cutoff without bias correction{p_end}
395
+ {synopt:{cmd:e(se_p)}}standard error for density test without bias correction{p_end}
396
+ {synopt:{cmd:e(T_p)}}t-statistic without bias correction{p_end}
397
+ {synopt:{cmd:e(pv_p)}}p-value for density test without bias correction{p_end}
398
+
399
+
400
+ {title:References}
401
+
402
+ {p 4 8}Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2018.
403
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":On the Effect of Bias Estimation on Coverage Accuracy in Nonparametric Inference}.{p_end}
404
+ {p 8 8}{it:Journal of the American Statistical Association} 113(522): 767-779.{p_end}
405
+
406
+ {p 4 8}Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2020.
407
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_CEopt.pdf":Coverage Error Optimal Confidence Intervals for Local Polynomial Regression}.{p_end}
408
+ {p 8 8}Working paper.{p_end}
409
+
410
+ {p 4 8}Cattaneo, M. D., B. Frandsen, and R. Titiunik. 2015.
411
+ {browse "https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf":Randomization Inference in the Regression Discontinuity Design: An Application to the Study of Party Advantages in the U.S. Senate}.{p_end}
412
+ {p 8 8}{it:Journal of Causal Inference} 3(1): 1-24.{p_end}
413
+
414
+ {p 4 8}Cattaneo, M. D., M. Jansson, and X. Ma. 2018.
415
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2018_Stata.pdf": Manipulation Testing based on Density Discontinuity}.{p_end}
416
+ {p 8 8}{it:Stata Journal} 18(1): 234-261.{p_end}
417
+
418
+ {p 4 8}Cattaneo, M. D., M. Jansson, and X. Ma. 2020.
419
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2020_JASA.pdf":Simple Local Polynomial Density Estimators}.{p_end}
420
+ {p 8 8}{it:Journal of the American Statistical Association} 115(531): 1449-1455.{p_end}
421
+
422
+ {p 4 8}Cattaneo, M. D., M. Jansson, and X. Ma. 2021a.
423
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2021_JoE.pdf":Local Regression Distribution Estimators}.{p_end}
424
+ {p 8 8}{it:Journal of Econometrics}, forthcoming.{p_end}
425
+
426
+ {p 4 8}Cattaneo, M. D., Michael Jansson, and Xinwei Ma. 2021b.
427
+ {browse "https://rdpackages.github.io/references/Cattaneo-Jansson-Ma_2021_JSS.pdf":lpdensity: Local Polynomial Density Estimation and Inference}.{p_end}
428
+ {p 8 8}{it:Journal of Statistical Software}, forthcoming.{p_end}
429
+
430
+ {p 4 8}Cattaneo, M. D., Titiunik, R. and G. Vazquez-Bare. 2017.
431
+ {browse "https://rdpackages.github.io/references/Cattaneo-Titiunik-VazquezBare_2017_JPAM.pdf":Comparing Inference Approaches for RD Designs: A Reexamination of the Effect of Head Start on Child Mortality}.{p_end}
432
+ {p 8 8}{it:Journal of Policy Analysis and Management} 36(3): 643-681.{p_end}
433
+
434
+ {p 4 8}McCrary, J. 2008. Manipulation of the Running Variable in the Regression Discontinuity Design: A Density Test.{p_end}
435
+ {p 8 8}{it:Journal of Econometrics} 142(2): 698-714.{p_end}
436
+
437
+
438
+ {title:Authors}
439
+
440
+ {p 4 8}Matias D. Cattaneo, Princeton University, Princeton, NJ.
441
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
442
+
443
+ {p 4 8}Michael Jansson, University of California Berkeley, Berkeley, CA.
444
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
445
+
446
+ {p 4 8}Xinwei Ma, University of California San Diego, La Jolla, CA.
447
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
448
+
449
+
450
+
30/replication_package/Adofiles/rd_2021/rddensity_fv.mo ADDED
Binary file (39.4 kB). View file
 
30/replication_package/Adofiles/rd_2021/rddensity_h.mo ADDED
Binary file (4.73 kB). View file
 
30/replication_package/Adofiles/rd_2021/rddensity_quantile.mo ADDED
Binary file (2.2 kB). View file
 
30/replication_package/Adofiles/rd_2021/rddensity_rep.mo ADDED
Binary file (2.94 kB). View file
 
30/replication_package/Adofiles/rd_2021/rddensity_unique.mo ADDED
Binary file (2.99 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdplot.ado ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *!version 8.1.0 2021-02-22
2
+
3
+ capture program drop rdplot
4
+ program define rdplot, eclass
5
+ syntax anything [if] [, c(real 0) p(integer 4) nbins(string) covs(string) covs_eval(string) covs_drop(string) binselect(string) scale(string) kernel(string) weights(string) h(string) k(integer 4) support(string) masspoints(string) genvars hide ci(real 0) shade graph_options(string) nochecks *]
6
+
7
+ marksample touse
8
+ tokenize "`anything'"
9
+ local y `1'
10
+ local x `2'
11
+
12
+ ******************** Set BW ***************************
13
+ tokenize `h'
14
+ local w : word count `h'
15
+ if `w' == 1 {
16
+ local h_r = `"`1'"'
17
+ local h_l = `"`1'"'
18
+ }
19
+ if `w' == 2 {
20
+ local h_l `"`1'"'
21
+ local h_r `"`2'"'
22
+ }
23
+ if `w' >= 3 {
24
+ di as error "{err}{cmd:h()} accepts at most two inputs"
25
+ exit 125
26
+ }
27
+ ******************** Set scale ***************************
28
+ tokenize `scale'
29
+ local w : word count `scale'
30
+ if `w' == 1 {
31
+ local scale_r = `"`1'"'
32
+ local scale_l = `"`1'"'
33
+ }
34
+ if `w' == 2 {
35
+ local scale_l `"`1'"'
36
+ local scale_r `"`2'"'
37
+ }
38
+ if `w' >= 3 {
39
+ di as error "{err}{cmd:scale()} accepts at most two inputs"
40
+ exit 125
41
+ }
42
+ ******************** Set nbins ***************************
43
+ tokenize `nbins'
44
+ local w : word count `nbins'
45
+ if `w' == 1 {
46
+ local nbins_r = `"`1'"'
47
+ local nbins_l = `"`1'"'
48
+ }
49
+ if `w' == 2 {
50
+ local nbins_l `"`1'"'
51
+ local nbins_r `"`2'"'
52
+ }
53
+ if `w' >= 3 {
54
+ di as error "{err}{cmd:nbins()} accepts at most two inputs"
55
+ exit 125
56
+ }
57
+ ******************** Set support ***************************
58
+ tokenize `support'
59
+ local w : word count `support'
60
+ if `w' == 2 {
61
+ local support_l = `"`1'"'
62
+ local support_r = `"`2'"'
63
+ }
64
+ if (`w' != 2 & "`support'"!="") {
65
+ di as error "{err}{cmd:support()} only accepts two inputs"
66
+ exit 125
67
+ }
68
+
69
+ *****************************************
70
+ preserve
71
+ sort `x', stable
72
+ qui keep if `touse'
73
+
74
+ *****************************************************************
75
+ **** DROP MISSINGS ******************************************
76
+ *****************************************************************
77
+ qui drop if `y'==. | `x'==.
78
+ if ("`covs'"~="") {
79
+ qui ds `covs'
80
+ local covs_list = r(varlist)
81
+ local ncovs: word count `covs_list'
82
+ foreach z in `covs_list' {
83
+ qui drop if `z'==.
84
+ }
85
+ }
86
+
87
+ **** CHECK colinearity ******************************************
88
+ local covs_drop_coll = 0
89
+ if ("`covs_drop'"=="") local covs_drop = "pinv"
90
+ if ("`covs'"~="") {
91
+
92
+ if ("`covs_drop'"=="invsym") local covs_drop_coll = 1
93
+ if ("`covs_drop'"=="pinv") local covs_drop_coll = 2
94
+
95
+ qui _rmcoll `covs_list'
96
+ local nocoll_controls_cat `r(varlist)'
97
+ local nocoll_controls ""
98
+ foreach myString of local nocoll_controls_cat {
99
+ if ~strpos("`myString'", "o."){
100
+ if ~strpos("`myString'", "MYRUNVAR"){
101
+ local nocoll_controls "`nocoll_controls' `myString'"
102
+ }
103
+ }
104
+ }
105
+ local covs_new `nocoll_controls'
106
+ qui ds `covs_new', alpha
107
+ local covs_list_new = r(varlist)
108
+ local ncovs_new: word count `covs_list_new'
109
+
110
+ if (`ncovs_new'<`ncovs') {
111
+ if ("`covs_drop'"=="off") {
112
+ di as error "{err}Multicollinearity issue detected in {cmd:covs}. Please rescale and/or remove redundant covariates, or add {cmd:covs_drop} option."
113
+ exit 125
114
+ }
115
+ else {
116
+ local ncovs = "`ncovs_new'"
117
+ local covs_list = "`covs_list_new'"
118
+ *local covs_drop_coll = 1
119
+ }
120
+ }
121
+ }
122
+
123
+
124
+
125
+
126
+ **** DEFAULTS ***************************************
127
+ if ("`masspoints'"=="") local masspoints = "adjust"
128
+ if ("`covs_eval'"=="") local covs_eval = "mean"
129
+ *****************************************************************
130
+
131
+
132
+
133
+ qui su `x'
134
+ local N = r(N)
135
+ local x_min = r(min)
136
+ local x_max = r(max)
137
+ if ("`support'"!="") {
138
+ if (`support_l'<`x_min') {
139
+ local x_min = `support_l'
140
+ }
141
+ if (`support_r'>`x_max') {
142
+ local x_max = `support_r'
143
+ }
144
+ }
145
+ local range_l = abs(`c'-`x_min')
146
+ local range_r = abs(`x_max'-`c')
147
+
148
+ qui su `x' if `x'<`c', d
149
+ local n_l = r(N)
150
+
151
+ qui su `x' if `x'>=`c', d
152
+ local n_r = r(N)
153
+ local n = `n_r' + `n_l'
154
+
155
+ qui su `y' if `x'<`c'
156
+ local var_l = r(sd)
157
+
158
+ qui su `y' if `x'>=`c'
159
+ local var_r = r(sd)
160
+
161
+ if ("`h_l'"=="" & "`h_r'"=="") {
162
+ local h_l = `range_l'
163
+ local h_r = `range_r'
164
+ }
165
+ if "`kernel'"=="" local kernel = "uni"
166
+
167
+ qui count if `x'<`c' & `x'>=`c'-`h_l'
168
+ local n_h_l = r(N)
169
+ qui count if `x'>=`c' & `x'<=`c'+`h_r'
170
+ local n_h_r = r(N)
171
+
172
+ **************************** ERRORS
173
+ if ("`scale_l'"=="" & "`scale_r'"=="") {
174
+ local scale_r = 1
175
+ local scale_l = 1
176
+ }
177
+ if ("`nbins_l'"=="" & "`nbins_r'"=="") {
178
+ local nbins_r = 0
179
+ local nbins_l = 0
180
+ }
181
+
182
+ if ("`binselect'"=="") {
183
+ local binselect = "esmv"
184
+ }
185
+
186
+ if ("`nochecks'"=="") {
187
+ if (`c'<=`x_min' | `c'>=`x_max'){
188
+ di as error "{err}{cmd:c()} should be set within the range of `x'"
189
+ exit 125
190
+ }
191
+
192
+ if ("`p'"<"0" | "`nbins_l'"<"0" | "`nbins_r'"<"0"){
193
+ di as error "{err}{cmd:p()} and {cmd:nbins()} should be a positive integers"
194
+ exit 411
195
+ }
196
+
197
+ if ("`k'"<="0"){
198
+ di as error "{err}{cmd:k()} should be a positive integer"
199
+ exit 411
200
+ }
201
+
202
+ if (`n'<20){
203
+ di as error "{err}Not enough observations to perform bin calculations"
204
+ exit 2001
205
+ }
206
+ }
207
+
208
+
209
+ *******************************
210
+ ****** Start MATA *************
211
+ *******************************
212
+ mata{
213
+ n_l=`n_l'
214
+ n_r=`n_r'
215
+ p=`p'
216
+ k=`k'
217
+ n=`n'
218
+ c=`c'
219
+ x_min = `x_min'
220
+ x_max = `x_max'
221
+ h_l = strtoreal("`h_l'"); h_r = strtoreal("`h_r'")
222
+ nbins_l = strtoreal("`nbins_l'"); nbins_r = strtoreal("`nbins_r'")
223
+ scale_l = strtoreal("`scale_l'"); scale_r = strtoreal("`scale_r'")
224
+
225
+ y = st_data(.,("`y'"), 0); x = st_data(.,("`x'"), 0)
226
+ x_l = select(x,x:<c); x_r = select(x,x:>=c)
227
+ y_l = select(y,x:<c); y_r = select(y,x:>=c)
228
+
229
+ *** Mass points check ********************************************
230
+ masspoints_found = 0
231
+ if ("`masspoints'"=="check" | "`masspoints'"=="adjust") {
232
+ X_uniq_l = sort(uniqrows(x_l),-1)
233
+ X_uniq_r = uniqrows(x_r)
234
+ M_l = length(X_uniq_l)
235
+ M_r = length(X_uniq_r)
236
+ M = M_l + M_r
237
+ st_numscalar("M_l", M_l); st_numscalar("M_r", M_r)
238
+ mass_l = 1-M_l/n_l
239
+ mass_r = 1-M_r/n_r
240
+ if (mass_l>=0.1 | mass_r>=0.1){
241
+ masspoints_found = 1
242
+ display("{err}Mass points detected in the running variable.")
243
+ if ("`masspoints'"=="adjust") {
244
+ if ("`binselect'"=="es") st_local("binselect","espr")
245
+ if ("`binselect'"=="esmv") st_local("binselect","esmvpr")
246
+ if ("`binselect'"=="qs") st_local("binselect","qspr")
247
+ if ("`binselect'"=="qsmv") st_local("binselect","qsmvpr")
248
+ }
249
+ if ("`masspoints'"=="check") display("{err}Try using option {cmd:masspoints(adjust)}.")
250
+
251
+ }
252
+ }
253
+ ******************************************************************************************
254
+
255
+ }
256
+
257
+ mata{
258
+
259
+ *if ("`hide'"=="" | "`genvars'"!="" ){
260
+
261
+ ************************************************************
262
+ ************ Polynomial curve (order = p) ******************
263
+ ************************************************************
264
+
265
+ if ("`covs'"=="") {
266
+
267
+ rp_l = J(n_l,(p+1),.)
268
+ rp_r = J(n_r,(p+1),.)
269
+ for (j=1; j<=(p+1); j++) {
270
+ rp_l[.,j] = (x_l:-c):^(j-1)
271
+ rp_r[.,j] = (x_r:-c):^(j-1)
272
+ }
273
+
274
+ wh_l = rdrobust_kweight(x_l,c,h_l,"`kernel'")
275
+ wh_r = rdrobust_kweight(x_r,c,h_r,"`kernel'")
276
+
277
+ if ("`weights'"~="") {
278
+ fw = st_data(.,("`weights'"), 0)
279
+ fw_l = select(fw,x:<c); fw_r = select(fw,x:>=c)
280
+ wh_l = fw_l:*wh_l; wh_r = fw_r:*wh_r
281
+ }
282
+
283
+
284
+ gamma_p1_l = cholinv(cross(rp_l,wh_l,rp_l))*cross(rp_l, wh_l, y_l)
285
+ gamma_p1_r = cholinv(cross(rp_r,wh_r,rp_r))*cross(rp_r, wh_r, y_r)
286
+
287
+
288
+
289
+ } else {
290
+
291
+ Y = st_data(.,("`y'"), 0); X = st_data(.,("`x'"), 0)
292
+ X_l = select(X,X:<`c'); X_r = select(X,X:>=`c')
293
+ Y_l = select(Y,X:<`c'); Y_r = select(Y,X:>=`c')
294
+ h_l = strtoreal("`h_l'"); h_r = strtoreal("`h_r'")
295
+ w_h_l = rdrobust_kweight(X_l,`c',h_l,"`kernel'"); w_h_r = rdrobust_kweight(X_r,`c',h_r,"`kernel'")
296
+ ind_l = selectindex(w_h_l:> 0); ind_r = selectindex(w_h_r:> 0)
297
+
298
+ eY_l = Y_l[ind_l]; eY_r = Y_r[ind_r]
299
+ eX_l = X_l[ind_l]; eX_r = X_r[ind_r]
300
+ W_h_l = w_h_l[ind_l]; W_h_r = w_h_r[ind_r]
301
+
302
+ u_l = (eX_l:-`c')/h_l; u_r = (eX_r:-`c')/h_r;
303
+ R_p_l = J(length(ind_l),(`p'+1),.); R_p_r = J(length(ind_r),(`p'+1),.)
304
+ for (j=1; j<=(`p'+1); j++) {
305
+ R_p_l[.,j] = (eX_l:-`c'):^(j-1); R_p_r[.,j] = (eX_r:-`c'):^(j-1)
306
+ }
307
+
308
+ L_l = quadcross(R_p_l:*W_h_l,u_l:^(`p'+1)); L_r = quadcross(R_p_r:*W_h_r,u_r:^(`p'+1))
309
+
310
+
311
+ invG_p_l = cholinv(quadcross(R_p_l,W_h_l,R_p_l));
312
+ invG_p_r = cholinv(quadcross(R_p_r,W_h_r,R_p_r))
313
+
314
+ Z = st_data(.,tokens("`covs'"), 0); dZ = cols(Z)
315
+ Z_l = select(Z,X:<`c'); eZ_l = Z_l[ind_l,]
316
+ Z_r = select(Z,X:>=`c'); eZ_r = Z_r[ind_r,]
317
+ D_l = eY_l,eZ_l; D_r = eY_r,eZ_r
318
+ U_p_l = quadcross(R_p_l:*W_h_l,D_l); U_p_r = quadcross(R_p_r:*W_h_r,D_r)
319
+
320
+ beta_p_l = invG_p_l*quadcross(R_p_l:*W_h_l,D_l)
321
+ beta_p_r = invG_p_r*quadcross(R_p_r:*W_h_r,D_r)
322
+
323
+ ZWD_p_l = quadcross(eZ_l,W_h_l,D_l)
324
+ ZWD_p_r = quadcross(eZ_r,W_h_r,D_r)
325
+ colsZ = (2)::(2+dZ-1)
326
+
327
+ UiGU_p_l = quadcross(U_p_l[,colsZ],invG_p_l*U_p_l)
328
+ UiGU_p_r = quadcross(U_p_r[,colsZ],invG_p_r*U_p_r)
329
+ ZWZ_p_l = ZWD_p_l[,colsZ] - UiGU_p_l[,colsZ]
330
+ ZWZ_p_r = ZWD_p_r[,colsZ] - UiGU_p_r[,colsZ]
331
+ ZWY_p_l = ZWD_p_l[,1] - UiGU_p_l[,1]
332
+ ZWY_p_r = ZWD_p_r[,1] - UiGU_p_r[,1]
333
+ ZWZ_p = ZWZ_p_r + ZWZ_p_l
334
+ ZWY_p = ZWY_p_r + ZWY_p_l
335
+
336
+ if ("`covs_drop_coll'"=="0") gamma_p = cholinv(ZWZ_p)*ZWY_p
337
+ if ("`covs_drop_coll'"=="1") gamma_p = invsym(ZWZ_p)*ZWY_p
338
+ if ("`covs_drop_coll'"=="2") gamma_p = pinv(ZWZ_p)*ZWY_p
339
+
340
+
341
+ s_Y = (1 \ -gamma_p[,1])
342
+ gamma_p1_l = (s_Y'*beta_p_l')'
343
+ gamma_p1_r = (s_Y'*beta_p_r')'
344
+ }
345
+
346
+ st_matrix("gamma_p1_l", gamma_p1_l)
347
+ st_matrix("gamma_p1_r", gamma_p1_r)
348
+
349
+ *********** Preparte data for polynomial curve plot *****
350
+ nplot = 500
351
+ x_plot_l = rangen(c-h_l,c,nplot)
352
+ x_plot_r = rangen(c,c+h_r,nplot)
353
+ rplot_l = J(nplot,(p+1),.)
354
+ rplot_r = J(nplot,(p+1),.)
355
+ for (j=1; j<=(p+1); j++) {
356
+ rplot_l[.,j] = (x_plot_l:-c):^(j-1)
357
+ rplot_r[.,j] = (x_plot_r:-c):^(j-1)
358
+ }
359
+
360
+ gammaZ = 0
361
+ if ("`covs_eval'"=="mean" & "`covs'"!="") gammaZ = mean(Z)*gamma_p
362
+
363
+ *yhat_x = (R_p_l*gamma_p1_l \ R_p_r*gamma_p1_r ) :+ gammaZ
364
+ *resid_yz = y-Z*gamma_p
365
+
366
+ y_plot_l = rplot_l*gamma_p1_l :+ gammaZ
367
+ y_plot_r = rplot_r*gamma_p1_r :+ gammaZ
368
+
369
+ *}
370
+
371
+ *******************************************************
372
+ **** Optimal Bins (using polynomial order k) **********
373
+ *******************************************************
374
+ rk_l = J(n_l,(k+1),.)
375
+ rk_r = J(n_r,(k+1),.)
376
+ for (j=1; j<=(k+1); j++) {
377
+ rk_l[.,j] = x_l:^(j-1)
378
+ rk_r[.,j] = x_r:^(j-1)
379
+ }
380
+ gamma_k1_l = invsym(cross(rk_l,rk_l))*cross(rk_l,y_l)
381
+ gamma_k2_l = invsym(cross(rk_l,rk_l))*cross(rk_l,y_l:^2)
382
+ gamma_k1_r = invsym(cross(rk_r,rk_r))*cross(rk_r,y_r)
383
+ gamma_k2_r = invsym(cross(rk_r,rk_r))*cross(rk_r,y_r:^2)
384
+
385
+ *** Bias w/sample
386
+ mu0_k1_l = rk_l*gamma_k1_l
387
+ mu0_k1_r = rk_r*gamma_k1_r
388
+ mu0_k2_l = rk_l*gamma_k2_l
389
+ mu0_k2_r = rk_r*gamma_k2_r
390
+ drk_l = J(n_l,k,.)
391
+ drk_r = J(n_r,k,.)
392
+ for (j=1; j<=k; j++) {
393
+ drk_l[.,j] = j*x_l:^(j-1)
394
+ drk_r[.,j] = j*x_r:^(j-1)
395
+ }
396
+
397
+ dxi_l=(x_l[2::length(x_l)]-x_l[1::(length(x_l)-1)])
398
+ dxi_r=(x_r[2::length(x_r)]-x_r[1::(length(x_r)-1)])
399
+ dyi_l=(y_l[2::length(y_l)]-y_l[1::(length(y_l)-1)])
400
+ dyi_r=(y_r[2::length(y_r)]-y_r[1::(length(y_r)-1)])
401
+
402
+ x_bar_i_l = (x_l[2::length(x_l)]+x_l[1::(length(x_l)-1)])/2
403
+ x_bar_i_r = (x_r[2::length(x_r)]+x_r[1::(length(x_r)-1)])/2
404
+
405
+ drk_i_l = J(n_l-1,k,.); rk_i_l = J(n_l-1,(k+1),.)
406
+ drk_i_r = J(n_r-1,k,.); rk_i_r = J(n_r-1,(k+1),.)
407
+
408
+ for (j=1; j<=(k+1); j++) {
409
+ rk_i_l[.,j] = x_bar_i_l:^(j-1)
410
+ rk_i_r[.,j] = x_bar_i_r:^(j-1)
411
+ }
412
+
413
+ for (j=1; j<=k; j++) {
414
+ drk_i_l[.,j] = j*x_bar_i_l:^(j-1)
415
+ drk_i_r[.,j] = j*x_bar_i_r:^(j-1)
416
+ }
417
+ mu1_i_hat_l = drk_i_l*(gamma_k1_l[2::(k+1)])
418
+ mu1_i_hat_r = drk_i_r*(gamma_k1_r[2::(k+1)])
419
+
420
+ mu0_i_hat_l = rk_i_l*gamma_k1_l
421
+ mu0_i_hat_r = rk_i_r*gamma_k1_r
422
+ mu2_i_hat_l = rk_i_l*gamma_k2_l
423
+ mu2_i_hat_r = rk_i_r*gamma_k2_r
424
+
425
+ mu0_hat_l = rk_l*gamma_k1_l
426
+ mu0_hat_r = rk_r*gamma_k1_r
427
+ mu2_hat_l = rk_l*gamma_k2_l
428
+ mu2_hat_r = rk_r*gamma_k2_r
429
+
430
+ mu1_hat_l = drk_l*(gamma_k1_l[2::(k+1)])
431
+ mu1_hat_r = drk_r*(gamma_k1_r[2::(k+1)])
432
+
433
+ mu1_i_hat_l = drk_i_l*(gamma_k1_l[2::(k+1)])
434
+ mu1_i_hat_r = drk_i_r*(gamma_k1_r[2::(k+1)])
435
+
436
+ sigma2_hat_l_bar = mu2_i_hat_l - mu0_i_hat_l:^2
437
+ sigma2_hat_r_bar = mu2_i_hat_r - mu0_i_hat_r:^2
438
+
439
+ sigma2_hat_l = mu2_hat_l - mu0_hat_l:^2
440
+ sigma2_hat_r = mu2_hat_r - mu0_hat_r:^2
441
+
442
+ var_y_l = variance(y_l)
443
+ var_y_r = variance(y_r)
444
+
445
+ B_es_hat_dw = (((c-x_min)^2/(12*n))*sum(mu1_hat_l:^2),((x_max-c)^2/(12*n))*sum(mu1_hat_r:^2))
446
+ V_es_hat_dw = ((0.5/(c-x_min))*sum(dxi_l:*dyi_l:^2),(0.5/(x_max-c))*sum(dxi_r:*dyi_r:^2))
447
+ V_es_chk_dw = ((1/(c-x_min))*sum(dxi_l:*sigma2_hat_l_bar),(1/(x_max-c))*sum(dxi_r:*sigma2_hat_r_bar))
448
+ J_es_hat_dw = ceil((((2*B_es_hat_dw):/V_es_hat_dw)*n):^(1/3))
449
+ J_es_chk_dw = ceil((((2*B_es_hat_dw):/V_es_chk_dw)*n):^(1/3))
450
+
451
+ B_qs_hat_dw = ((n_l^2/(24*n))*sum(dxi_l:^2:*mu1_i_hat_l:^2), (n_r^2/(24*n))*sum(dxi_r:^2:*mu1_i_hat_r:^2))
452
+ V_qs_hat_dw = ((1/(2*n_l))*sum(dyi_l:^2),(1/(2*n_r))*sum(dyi_r:^2))
453
+ V_qs_chk_dw = ((1/n_l)*sum(sigma2_hat_l), (1/n_r)*sum(sigma2_hat_r))
454
+ J_qs_hat_dw = ceil((((2*B_qs_hat_dw):/V_qs_hat_dw)*n):^(1/3))
455
+ J_qs_chk_dw = ceil((((2*B_qs_hat_dw):/V_qs_chk_dw)*n):^(1/3))
456
+
457
+ J_es_hat_mv = (ceil((var_y_l/V_es_hat_dw[1])*(n/log(n)^2)), ceil((var_y_r/V_es_hat_dw[2])*(n/log(n)^2)))
458
+ J_es_chk_mv = (ceil((var_y_l/V_es_chk_dw[1])*(n/log(n)^2)), ceil((var_y_r/V_es_chk_dw[2])*(n/log(n)^2)))
459
+ J_qs_hat_mv = (ceil((var_y_l/V_qs_hat_dw[1])*(n/log(n)^2)), ceil((var_y_r/V_qs_hat_dw[2])*(n/log(n)^2)))
460
+ J_qs_chk_mv = (ceil((var_y_l/V_qs_chk_dw[1])*(n/log(n)^2)), ceil((var_y_r/V_qs_chk_dw[2])*(n/log(n)^2)))
461
+
462
+ if ("`binselect'"=="es" ) {
463
+ J_star_l_orig = J_es_hat_dw[1]
464
+ J_star_r_orig = J_es_hat_dw[2]
465
+ }
466
+
467
+ if ("`binselect'"=="esmv" | "`binselect'"=="") {
468
+ J_star_l_orig = J_es_hat_mv[1]
469
+ J_star_r_orig = J_es_hat_mv[2]
470
+ }
471
+
472
+ if ("`binselect'"=="espr" ) {
473
+ J_star_l_orig = J_es_chk_dw[1]
474
+ J_star_r_orig = J_es_chk_dw[2]
475
+ }
476
+
477
+ if ("`binselect'"=="esmvpr" ) {
478
+ J_star_l_orig = J_es_chk_mv[1]
479
+ J_star_r_orig = J_es_chk_mv[2]
480
+ }
481
+
482
+ if ("`binselect'"=="qs" ) {
483
+ J_star_l_orig = J_qs_hat_dw[1]
484
+ J_star_r_orig = J_qs_hat_dw[2]
485
+ }
486
+
487
+ if ("`binselect'"=="qsmv" ) {
488
+ J_star_l_orig = J_qs_hat_mv[1]
489
+ J_star_r_orig = J_qs_hat_mv[2]
490
+ }
491
+
492
+ if ("`binselect'"=="qspr" ) {
493
+ J_star_l_orig = J_qs_chk_dw[1]
494
+ J_star_r_orig = J_qs_chk_dw[2]
495
+ }
496
+
497
+ if ("`binselect'"=="qsmvpr" ) {
498
+ J_star_l_orig = J_qs_chk_mv[1]
499
+ J_star_r_orig = J_qs_chk_mv[2]
500
+ }
501
+
502
+ if (nbins_l!=0 & nbins_r!=0) {
503
+ J_star_l_orig = nbins_l
504
+ J_star_r_orig = nbins_r
505
+ }
506
+
507
+ if (`var_l'==0) {
508
+ J_star_l = 1
509
+ J_star_l_orig = 1
510
+ display("{err}Warning: not enough variability in the outcome variable below the threshold")
511
+ }
512
+ if (`var_r'==0) {
513
+ J_star_r = 1
514
+ J_star_r_orig = 1
515
+ display("{err}Warning: not enough variability in the outcome variable above the threshold")
516
+ }
517
+
518
+ J_star_l = round(`scale_l'*J_star_l_orig)
519
+ J_star_r = round(`scale_r'*J_star_r_orig)
520
+
521
+ st_numscalar("nbins_l", nbins_l)
522
+ st_numscalar("nbins_r", nbins_r)
523
+ st_numscalar("J_star_l", J_star_l)
524
+ st_numscalar("J_star_r", J_star_r)
525
+ st_numscalar("J_star_l_orig", J_star_l_orig)
526
+ st_numscalar("J_star_r_orig", J_star_r_orig)
527
+
528
+ st_matrix("J_es_hat_dw", J_es_hat_dw)
529
+ st_matrix("J_qs_hat_dw", J_qs_hat_dw)
530
+ st_matrix("J_es_chk_dw", J_es_chk_dw)
531
+ st_matrix("J_qs_chk_dw", J_qs_chk_dw)
532
+ st_matrix("J_es_hat_mv", J_es_hat_mv)
533
+ st_matrix("J_qs_hat_mv", J_qs_hat_mv)
534
+ st_matrix("J_es_chk_mv", J_es_chk_mv)
535
+ st_matrix("J_qs_chk_mv", J_qs_chk_mv)
536
+ }
537
+
538
+
539
+ ********************************************************
540
+ **** Generate id and rdplot vars ***********************
541
+ ********************************************************
542
+ local J_star_l = J_star_l
543
+ local J_star_r = J_star_r
544
+
545
+ qui gen rdplot_id = .
546
+ qui gen rdplot_min_bin = .
547
+ qui gen rdplot_max_bin = .
548
+ qui gen rdplot_mean_bin = .
549
+
550
+
551
+ if ("`binselect'"=="qs" | "`binselect'"=="qspr" | "`binselect'"=="qsmv" | "`binselect'"=="qsmvpr") {
552
+ pctile binsL = `x' if `x'<`c', nq(`J_star_l')
553
+ pctile binsR = `x' if `x'>=`c', nq(`J_star_r')
554
+ }
555
+
556
+ mata {
557
+ x_min = `x_min'
558
+ x_max = `x_max'
559
+
560
+ if ("`binselect'"=="es" | "`binselect'"=="espr" | "`binselect'"=="esmv" | "`binselect'"=="esmvpr" | "`binselect'"=="") {
561
+ binsL = rangen(x_min,c , `J_star_l'+1)
562
+ binsR = rangen(c ,x_max, `J_star_r'+1)
563
+ bins = binsL[1..length(binsL)-1]\binsR
564
+ }
565
+
566
+ if ("`binselect'"=="qs" | "`binselect'"=="qspr" | "`binselect'"=="qsmv" | "`binselect'"=="qsmvpr") {
567
+ bins = (x_min \ st_data(.,"binsL",0) \ c \ st_data(.,"binsR",0) \ x_max )
568
+ }
569
+
570
+ st_view(ZZ=.,., "`x' rdplot_id rdplot_min_bin rdplot_max_bin rdplot_mean_bin", "`touse'")
571
+ bin_i = 2
572
+ for(i=1; i<=rows(ZZ); i++) {
573
+ while(ZZ[i,1] >= bins[bin_i] & bin_i < length(bins)) bin_i++
574
+ /* PUT rdplot_id */
575
+ ZZ[i,2] = bin_i - `J_star_l' - 2
576
+ if (ZZ[i,2] >= 0) ZZ[i,2] = ZZ[i,2] + 1
577
+ /* PUT rdplot_min_bin rdplot_max_bin rdplot_mean_bin */
578
+ ZZ[i,3] = bins[bin_i-1]
579
+ ZZ[i,4] = bins[bin_i]
580
+ ZZ[i,5] = (bins[bin_i]+bins[bin_i-1])/2
581
+ }
582
+
583
+ }
584
+
585
+ ** STATA: Generate inputs for RDPLOT (and possibly for reporting back to user)
586
+ if ("`covs_eval'"=="" | "`covs_eval'"=="0") {
587
+ collapse (count) rdplot_N=`x' (mean) rdplot_min_bin rdplot_max_bin rdplot_mean_bin ///
588
+ (mean) rdplot_mean_x=`x' rdplot_mean_y=`y' ///
589
+ (semean) rdplot_se_y=`y', by(rdplot_id) fast
590
+ }
591
+
592
+ **************************************************************************
593
+ **** covs_eval **********************************************************
594
+ **************************************************************************
595
+ if ("`covs_eval'"=="mean") {
596
+ tempvar rdplot_id2 yhat_tmp yhatZ
597
+ qui gen `rdplot_id2' = rdplot_id + `J_star_l'
598
+ qui reg `y' `covs_list' i.`rdplot_id2'
599
+ qui predict `yhatZ'
600
+
601
+ collapse (count) rdplot_N=`x' (mean) rdplot_min_bin rdplot_max_bin rdplot_mean_bin ///
602
+ (mean) rdplot_mean_x=`x' rdplot_mean_y=`yhatZ' ///
603
+ (semean) rdplot_se_y=`y', by(rdplot_id) fast
604
+ }
605
+
606
+ qui replace rdplot_N=rdplot_N-1
607
+ qui gen quant = -invt(rdplot_N, abs((1-(`ci'/100))/2))
608
+ qui gen rdplot_ci_l = rdplot_mean_y - quant*rdplot_se_y
609
+ qui gen rdplot_ci_r = rdplot_mean_y + quant*rdplot_se_y
610
+ qui drop quant
611
+
612
+ mata{
613
+ if ("`genvars'"!="") {
614
+ ** MATA: Save rdplot inputs to return to user in original dataset
615
+ rdplot = st_data(.,.)
616
+ }
617
+ }
618
+
619
+ qui gen bin_length = rdplot_max_bin-rdplot_min_bin
620
+ qui su bin_length if rdplot_id<0, d
621
+ local bin_avg_l = r(mean)
622
+ local bin_med_l = r(p50)
623
+ qui su bin_length if rdplot_id>0, d
624
+ local bin_avg_r = r(mean)
625
+ local bin_med_r = r(p50)
626
+
627
+ if ("`binselect'"=="es"){
628
+ local binselect_type="evenly spaced number of bins using spacings estimators."
629
+ scalar J_star_l_IMSE = J_es_hat_dw[1,1]
630
+ scalar J_star_r_IMSE = J_es_hat_dw[1,2]
631
+ scalar J_star_l_MV = J_es_hat_mv[1,1]
632
+ scalar J_star_r_MV = J_es_hat_mv[1,2]
633
+ }
634
+ if ("`binselect'"=="espr"){
635
+ local binselect_type="evenly spaced number of bins using polynomial regression."
636
+ scalar J_star_l_IMSE = J_es_chk_dw[1,1]
637
+ scalar J_star_r_IMSE = J_es_chk_dw[1,2]
638
+ scalar J_star_l_MV = J_es_chk_mv[1,1]
639
+ scalar J_star_r_MV = J_es_chk_mv[1,2]
640
+ }
641
+ if ("`binselect'"=="esmv" | "`binselect'"==""){
642
+ local binselect_type="evenly spaced mimicking variance number of bins using spacings estimators."
643
+ scalar J_star_l_IMSE = J_es_hat_dw[1,1]
644
+ scalar J_star_r_IMSE = J_es_hat_dw[1,2]
645
+ scalar J_star_l_MV = J_es_hat_mv[1,1]
646
+ scalar J_star_r_MV = J_es_hat_mv[1,2]
647
+ }
648
+ if ("`binselect'"=="esmvpr"){
649
+ local binselect_type="evenly spaced mimicking variance number of bins using polynomial regression."
650
+ scalar J_star_l_IMSE = J_es_chk_dw[1,1]
651
+ scalar J_star_r_IMSE = J_es_chk_dw[1,2]
652
+ scalar J_star_l_MV = J_es_chk_mv[1,1]
653
+ scalar J_star_r_MV = J_es_chk_mv[1,2]
654
+ }
655
+ if ("`binselect'"=="qs"){
656
+ local binselect_type="quantile spaced number of bins using spacings estimators."
657
+ scalar J_star_l_IMSE = J_qs_hat_dw[1,1]
658
+ scalar J_star_r_IMSE = J_qs_hat_dw[1,2]
659
+ scalar J_star_l_MV = J_qs_hat_mv[1,1]
660
+ scalar J_star_r_MV = J_qs_hat_mv[1,2]
661
+ }
662
+ if ("`binselect'"=="qspr"){
663
+ local binselect_type="quantile spaced number of bins using polynomial regression."
664
+ scalar J_star_l_IMSE = J_qs_chk_dw[1,1]
665
+ scalar J_star_r_IMSE = J_qs_chk_dw[1,2]
666
+ scalar J_star_l_MV = J_qs_chk_mv[1,1]
667
+ scalar J_star_r_MV = J_qs_chk_mv[1,2]
668
+ }
669
+ if ("`binselect'"=="qsmv"){
670
+ local binselect_type="quantile spaced mimicking variance quantile spaced using spacings estimators."
671
+ scalar J_star_l_IMSE = J_qs_hat_dw[1,1]
672
+ scalar J_star_r_IMSE = J_qs_hat_dw[1,2]
673
+ scalar J_star_l_MV = J_qs_hat_mv[1,1]
674
+ scalar J_star_r_MV = J_qs_hat_mv[1,2]
675
+ }
676
+ if ("`binselect'"=="qsmvpr"){
677
+ local binselect_type="quantile spaced mimicking variance number of bins using polynomial regression."
678
+ scalar J_star_l_IMSE = J_qs_chk_dw[1,1]
679
+ scalar J_star_r_IMSE = J_qs_chk_dw[1,2]
680
+ scalar J_star_l_MV = J_qs_chk_mv[1,1]
681
+ scalar J_star_r_MV = J_qs_chk_mv[1,2]
682
+ }
683
+ if (nbins_l!=0 | nbins_r!=0 ) local binselect_type= "RD plot with manually set number of bins."
684
+
685
+ scalar scale_l = J_star_l / J_star_l_IMSE
686
+ scalar scale_r = J_star_r / J_star_r_IMSE
687
+
688
+ qui getmata x_plot_l x_plot_r y_plot_l y_plot_r, force
689
+
690
+ ereturn clear
691
+ ereturn scalar N_l = `n_l'
692
+ ereturn scalar N_r = `n_r'
693
+ ereturn scalar c = `c'
694
+ ereturn scalar J_star_l = J_star_l
695
+ ereturn scalar J_star_r = J_star_r
696
+ ereturn matrix coef_l = gamma_p1_l
697
+ ereturn matrix coef_r = gamma_p1_r
698
+ ereturn local binselect = "`binselect'"
699
+
700
+ if ("`kernel'"=="epanechnikov" | "`kernel'"=="epa") local kernel_type = "Epanechnikov"
701
+ else if ("`kernel'"=="uniform" | "`kernel'"=="uni") local kernel_type = "Uniform"
702
+ else local kernel_type = "Triangular"
703
+
704
+ disp ""
705
+ disp in smcl in yellow "RD Plot with " "`binselect_type'"
706
+ disp ""
707
+
708
+ disp in smcl in gr "{ralign 21: Cutoff c = `c'}" _col(22) " {c |} " _col(23) in gr "Left of " in yellow "c" _col(36) in gr "Right of " in yellow "c" _col(54) in gr "Number of obs = " in yellow %10.0f `n'
709
+ disp in smcl in gr "{hline 22}{c +}{hline 22}" _col(54) in gr "Kernel = " in yellow "{ralign 10:`kernel_type'}"
710
+ disp in smcl in gr "{ralign 21:Number of obs}" _col(22) " {c |} " _col(23) as result %9.0f `n_l' _col(37) %9.0f `n_r'
711
+ disp in smcl in gr "{ralign 21:Eff. Number of obs}" _col(22) " {c |} " _col(23) as result %9.0f `n_h_l' _col(37) %9.0f `n_h_r'
712
+ disp in smcl in gr "{ralign 21:Order poly. fit (p)}" _col(22) " {c |} " _col(23) as result %9.0f `p' _col(37) %9.0f `p'
713
+ disp in smcl in gr "{ralign 21:BW poly. fit (h)}" _col(22) " {c |} " _col(23) as result %9.3f `h_l' _col(37) %9.3f `h_r'
714
+ disp in smcl in gr "{ralign 21:Number of bins scale}" _col(22) " {c |} " _col(23) as result %9.3f `scale_l' _col(37) %9.3f `scale_r'
715
+ disp ""
716
+ disp "Outcome: `y'. Running variable: `x'."
717
+ disp in smcl in gr "{hline 22}{c TT}{hline 22}"
718
+ disp in smcl in gr _col(22) " {c |} " _col(23) in gr "Left of " in yellow "c" _col(36) in gr "Right of " in yellow "c"
719
+ disp in smcl in gr "{hline 22}{c +}{hline 22}"
720
+ disp in smcl in gr "{ralign 21:Bins selected}" _col(22) " {c |} " _col(23) as result %9.0f e(J_star_l) _col(37) %9.0f e(J_star_r)
721
+ disp in smcl in gr "{ralign 21:Average bin length}" _col(22) " {c |} " _col(23) as result %9.3f `bin_avg_l' _col(37) %9.3f `bin_avg_r'
722
+ disp in smcl in gr "{ralign 21:Median bin length}" _col(22) " {c |} " _col(23) as result %9.3f `bin_med_l' _col(37) %9.3f `bin_med_r'
723
+ disp in smcl in gr "{hline 22}{c +}{hline 22}"
724
+ disp in smcl in gr "{ralign 21:IMSE-optimal bins}" _col(22) " {c |} " _col(23) as result %9.0f J_star_l_IMSE _col(37) %9.0f J_star_r_IMSE
725
+ disp in smcl in gr "{ralign 21:Mimicking Var. bins}" _col(22) " {c |} " _col(23) as result %9.0f J_star_l_MV _col(37) %9.0f J_star_r_MV
726
+ disp in smcl in gr "{hline 22}{c +}{hline 22}"
727
+ disp in smcl in gr "{lalign 1:Rel. to IMSE-optimal:}" _col(22) " {c |} "
728
+ disp in smcl in gr "{ralign 21:Implied scale}" _col(22) " {c |} " _col(23) as result %9.3f scale_l _col(37) %9.3f scale_r
729
+ disp in smcl in gr "{ralign 21:WIMSE var. weight}" _col(22) " {c |} " _col(23) as result %9.3f 1/(1+scale_l^3) _col(37) %9.3f 1/(1+scale_r^3)
730
+ disp in smcl in gr "{ralign 21:WIMSE bias weight}" _col(22) " {c |} " _col(23) as result %9.3f scale_l^3/(1+scale_l^3) _col(37) %9.3f scale_r^3/(1+scale_r^3)
731
+ disp in smcl in gr "{hline 22}{c BT}{hline 22}"
732
+ disp ""
733
+ if ("`covs'"!="") disp "Covariate-adjusted estimates. Additional covariates included: `ncovs'"
734
+ if (`covs_drop_coll'==1) di as error "{err}Variables dropped due to multicollinearity."
735
+
736
+
737
+ if ("`hide'"==""){
738
+ if (`"`graph_options'"'=="" ) local graph_options = `"title("Regression function fit", color(gs0)) "'
739
+
740
+ if (`ci'==0) {
741
+ twoway (scatter rdplot_mean_y rdplot_mean_bin, sort msize(small) mcolor(gs10)) ///
742
+ (line y_plot_l x_plot_l, lcolor(black) sort lwidth(medthin) lpattern(solid) ) ///
743
+ (line y_plot_r x_plot_r, lcolor(black) sort lwidth(medthin) lpattern(solid) ), ///
744
+ xline(`c', lcolor(black) lwidth(medthin)) xscale(r(`x_min' `x_max')) legend(cols(2) order(1 "Sample average within bin" 2 "Polynomial fit of order `p'" )) `graph_options'
745
+ }
746
+ else {
747
+ if ("`shade'"==""){
748
+ twoway (rcap rdplot_ci_l rdplot_ci_r rdplot_mean_bin, color(gs11)) ///
749
+ (scatter rdplot_mean_y rdplot_mean_bin, sort msize(small) mcolor(gs10)) ///
750
+ (line y_plot_l x_plot_l, lcolor(black) sort lwidth(medthin) lpattern(solid)) ///
751
+ (line y_plot_r x_plot_r, lcolor(black) sort lwidth(medthin) lpattern(solid)), ///
752
+ xline(`c', lcolor(black) lwidth(medthin)) xscale(r(`x_min' `x_max')) legend(cols(2) order(2 "Sample average within bin" 3 "Polynomial fit of order `p'" )) `graph_options'
753
+ }
754
+ else {
755
+ twoway (rarea rdplot_ci_l rdplot_ci_r rdplot_mean_bin if rdplot_id<0, sort color(gs11)) ///
756
+ (rarea rdplot_ci_l rdplot_ci_r rdplot_mean_bin if rdplot_id>0, sort color(gs11)) ///
757
+ (scatter rdplot_mean_y rdplot_mean_bin, sort msize(small) mcolor(gs10)) ///
758
+ (line y_plot_l x_plot_l, lcolor(black) sort lwidth(medthin) lpattern(solid)) ///
759
+ (line y_plot_r x_plot_r, lcolor(black) sort lwidth(medthin) lpattern(solid)) , ///
760
+ xline(`c', lcolor(black) lwidth(medthin)) xscale(r(`x_min' `x_max')) legend(cols(2) order(2 "Sample average within bin" 3 "Polynomial fit of order `p'" )) `graph_options'
761
+ }
762
+ }
763
+ }
764
+
765
+ restore
766
+
767
+ ****************************
768
+ ** PART 2: genvars=TRUE
769
+ ****************************
770
+ if ("`genvars'"!="") {
771
+ qui for any id N min_bin max_bin mean_bin mean_x mean_y se_y ci_l ci_r hat_y: qui gen rdplot_X = .
772
+ }
773
+
774
+ mata{
775
+ if ("`genvars'"~="") {
776
+ st_view(ZZ=.,., "`x' rdplot_id rdplot_N rdplot_min_bin rdplot_max_bin rdplot_mean_bin rdplot_mean_x rdplot_mean_y rdplot_se_y rdplot_ci_l rdplot_ci_r rdplot_hat_y", "`touse'")
777
+ for (i=1; i<=rows(ZZ); i++) {
778
+ if (ZZ[i,1]!=.) {
779
+ bin_i = 2; while(ZZ[i,1] >= bins[bin_i] & bin_i < length(bins)) bin_i++
780
+ rdplot_i = bin_i - `J_star_l' - 2
781
+ if (rdplot_i >= 0) rdplot_i = rdplot_i + 1
782
+ ZZ[i,2..11] = select(rdplot, rdplot[.,1]:==rdplot_i)
783
+ ZZ[i,12] = 0; for (j=0; j<=p; j++) {
784
+ if (ZZ[i,2] <0) ZZ[i,12] = ZZ[i,12] + ((ZZ[i,1]-c)^j)*gamma_p1_l[j+1]
785
+ else ZZ[i,12] = ZZ[i,12] + ((ZZ[i,1]-c)^j)*gamma_p1_r[j+1]
786
+ }
787
+ }
788
+ }
789
+ }
790
+ }
791
+
792
+ mata mata clear
793
+ end
794
+
795
+
796
+
30/replication_package/Adofiles/rd_2021/rdplot.sthlp ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *!version 8.1.0 2021-02-22}{...}
3
+ {viewerjumpto "Syntax" "rdplot##syntax"}{...}
4
+ {viewerjumpto "Description" "rdplot##description"}{...}
5
+ {viewerjumpto "Options" "rdplot##options"}{...}
6
+ {viewerjumpto "Examples" "rdplot##examples"}{...}
7
+ {viewerjumpto "Stored results" "rdplot##stored_results"}{...}
8
+ {viewerjumpto "References" "rdplot##references"}{...}
9
+ {viewerjumpto "Authors" "rdplot##authors"}{...}
10
+
11
+ {title:Title}
12
+
13
+ {p 4 8}{cmd:rdplot} {hline 2} Data-Driven Regression Discontinuity Plots.{p_end}
14
+
15
+ {marker syntax}{...}
16
+ {title:Syntax}
17
+
18
+ {p 4 8}{cmd:rdplot } {it:depvar} {it:indepvar} {ifin}
19
+ [{cmd:,}
20
+ {cmd:c(}{it:#}{cmd:)}
21
+ {cmd:nbins(}{it:# #}{cmd:)}
22
+ {cmd:binselect(}{it:binmethod}{cmd:)}
23
+ {cmd:scale(}{it:# #}{cmd:)}
24
+ {cmd:support(}{it:# #}{cmd:)}
25
+ {cmd:p(}{it:#}{cmd:)}
26
+ {cmd:h(}{it:# #}{cmd:)}
27
+ {cmd:kernel(}{it:kernelfn}{cmd:)}
28
+ {cmd:weights(}{it:weightsvar}{cmd:)}
29
+ {cmd:covs(}{it:covars}{cmd:)}
30
+ {cmd:covs_eval(}{it:covars_eval}{cmd:)}
31
+ {cmd:covs_drop(}{it:covsdropoption}{cmd:)}
32
+ {cmd:masspoints(}{it:masspointsoption}{cmd:)}
33
+ {cmd:ci(}{it:cilevel}{cmd:)}
34
+ {it:shade}
35
+ {cmd:graph_options(}{it:gphopts}{cmd:)}
36
+ {it:hide}
37
+ {it:genvars}
38
+ ]{p_end}
39
+
40
+ {synoptset 28 tabbed}{...}
41
+
42
+ {marker description}{...}
43
+ {title:Description}
44
+
45
+ {p 4 8}{cmd:rdplot} implements several data-driven Regression Discontinuity (RD) plots, using either evenly-spaced or quantile-spaced partitioning. Two type of RD plots are constructed: (i) RD plots with binned sample means tracing out the underlying regression function, and (ii) RD plots with binned sample means
46
+ mimicking the underlying variability of the data. For technical and methodological details see
47
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_JASA.pdf":Calonico, Cattaneo and Titiunik (2015a)}.{p_end}
48
+
49
+ {p 8 8} Companion commands are: {help rdrobust:rdrobust} for point estimation and inference procedures, and {help rdbwselect:rdbwselect} for data-driven bandwidth selection.{p_end}
50
+
51
+ {p 8 8}A detailed introduction to this command is given in
52
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Calonico, Cattaneo and Titiunik (2014)},
53
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2017_Stata.pdf":Calonico, Cattaneo, Farrell and Titiunik (2017)}. A companion {browse "www.r-project.org":R} package is also described in
54
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_R.pdf":Calonico, Cattaneo and Titiunik (2015b)}.{p_end}
55
+
56
+ {p 4 8}Related Stata and R packages useful for inference in RD designs are described in the following website:{p_end}
57
+
58
+ {p 8 8}{browse "https://rdpackages.github.io":https://rdpackages.github.io}{p_end}
59
+
60
+
61
+ {marker options}{...}
62
+ {title:Options}
63
+
64
+ {dlgtab:Estimand}
65
+
66
+ {p 4 8}{cmd:c(}{it:#}{cmd:)} specifies the RD cutoff in {it:indepvar}.
67
+ Default is {cmd:c(0)}.
68
+
69
+ {dlgtab:Bin Selection}
70
+
71
+ {p 4 8}{cmd:nbins(}{it:# #}{cmd:)} specifies the number of bins used to the left of the cutoff, denoted {it:J-}, and to the right of the cutoff, denoted {it:J+}, respectively.
72
+ If not specified, {it:J+} and {it:J-} are estimated using the method and options chosen below.
73
+
74
+ {p 4 8}{cmd:binselect(}{it:binmethod}{cmd:)} specifies the data-driven procedure to select the number of bins. This option is available only if {it:J-} and {it:J+} are not set manually using {cmd:nbins(.)}.
75
+ Options are:{p_end}
76
+ {p 8 12}{opt es} IMSE-optimal evenly-spaced method using spacings estimators.{p_end}
77
+ {p 8 12}{opt espr} IMSE-optimal evenly-spaced method using polynomial regression.{p_end}
78
+ {p 8 12}{opt esmv} mimicking variance evenly-spaced method using spacings estimators.{p_end}
79
+ {p 8 12}{opt esmvpr} mimicking variance evenly-spaced method using polynomial regression.{p_end}
80
+ {p 8 12}{opt qs} IMSE-optimal quantile-spaced method using spacings estimators.{p_end}
81
+ {p 8 12}{opt qspr} IMSE-optimal quantile-spaced method using polynomial regression.{p_end}
82
+ {p 8 12}{opt qsmv} mimicking variance quantile-spaced method using spacings estimators.{p_end}
83
+ {p 8 12}{opt qsmvpr} mimicking variance quantile-spaced method using polynomial regression.{p_end}
84
+ {p 8 12}Default is {cmd:binselect(esmv)}.{p_end}
85
+ {p 8 12}Note: procedures involving spacing estimators are not invariant to rearrangements of {it:depvar} when there are repeated values (i.e., mass points in the running variable).{p_end}
86
+
87
+ {p 4 8}{cmd:scale(}{it:# #}{cmd:)} specifies multiplicative factors, denoted {it:s-} and {it:s+}, respectively, to adjust the number of bins selected. Specifically, the number of bins used for the treatment and control groups will be
88
+ ceil({cmd:s- * J-}) and ceil({cmd:s+ * J+}), where J- and J+ denote the optimal numbers of bins originally computed for each group.
89
+ Default is {cmd:scale(1 1)}.
90
+
91
+ {p 4 8}{cmd:support(}{it:# #}{cmd:)} sets an optional extended support of the running variable to be used in the construction of the bins. Default is the sample range.
92
+
93
+ {p 4 8}{cmd:masspoints(}{it:masspointsoption}{cmd:)} checks and controls for repeated observations in the running variable.
94
+ Options are:{p_end}
95
+ {p 8 12}{opt off} ignores the presence of mass points. {p_end}
96
+ {p 8 12}{opt check} looks for and reports the number of unique observations at each side of the cutoff. {p_end}
97
+ {p 8 12}{opt adjust} sets {cmd:binselect(}{it:binmethod}{cmd:)} as polynomial regression when mass points are present. {p_end}
98
+ {p 8 12} Default option is {cmd:masspoints(adjust)}.{p_end}
99
+
100
+ {dlgtab:Polynomial Fit}
101
+
102
+ {p 4 8}{cmd:p(}{it:#}{cmd:)} specifies the order of the (global) polynomial fit used to approximate the population conditional expectation functions for control and treated units.
103
+ Default is {cmd:p(4)}.
104
+
105
+ {p 4 8}{cmd:h(}{it:# #}{cmd:)} specifies the bandwidth used to construct the (global) polynomial fits given the kernel choice {cmd:kernel(.)}.
106
+ If not specified, the bandwidths are chosen to span the full support of the data. If two bandwidths are specified, the first bandwidth is used for the data below the cutoff and the second bandwidth is used for the data above the cutoff.
107
+
108
+ {p 4 8}{cmd:kernel(}{it:kernelfn}{cmd:)} specifies the kernel function used to construct the local-polynomial estimator(s). Options are: {opt tri:angular}, {opt epa:nechnikov}, and {opt uni:form}.
109
+ Default is {cmd:kernel(uniform)} (i.e., equal/no weighting to all observations on the support of the kernel).
110
+
111
+ {p 4 8}{cmd:weights(}{it:weightsvar}{cmd:)} is the variable used for optional weighting of the estimation procedure. The unit-specific weights multiply the kernel function.{p_end}
112
+
113
+ {p 4 8}{cmd:covs(}{it:covars}{cmd:)} additional covariates used to construct the local-polynomial estimator(s).{p_end}
114
+
115
+ {p 4 8}{cmd:covs_eval(}{it:covars_eval}{cmd:)} sets the evaluation points for the additional covariates, when included in the estimation. Options are: {opt 0} (default) and {opt mean}.
116
+
117
+ {p 4 8}{cmd:covs_drop(}{it:covsdropoption}{cmd:)} assess collinearity in additional covariates used for estimation and inference. Options {opt pinv} (default choice) and {opt invsym} drops collinear additional covariates, differing only in the type of inverse function used. Option {opt off} only checks collinear additional covariates but does not drop them.{p_end}
118
+
119
+ {dlgtab:Plot Options}
120
+
121
+ {p 4 8}{cmd:ci(}{it:cilevel}{cmd:)} graphical option to display confidence intervals of level {it:cilevel} for each bin.
122
+
123
+ {p 4 8}{cmd:shade} graphical option to replace confidence intervals with shaded areas.
124
+
125
+ {p 4 8}{cmd:graph_options(}{it:gphopts}{cmd:)} graphical options to be passed on to the underlying graph command.
126
+
127
+ {p 4 8}{cmd:hide} omits the RD plot.
128
+
129
+ {dlgtab:Generate Variables}
130
+
131
+ {p 4 8}{it:genvars} generates new variables storing the following results.{p_end}
132
+ {p 8 12}{opt rdplot_id} unique bin ID for each observation. Negative natural numbers are assigned to observations to the left of the cutoff, and positive natural numbers are assigned to observations to the right of the cutoff.{p_end}
133
+ {p 8 12}{opt rdplot_N} number of observations in the corresponding bin for each observation.{p_end}
134
+ {p 8 12}{opt rdplot_min_bin} lower end value of the bin for each observation.{p_end}
135
+ {p 8 12}{opt rdplot_max_bin} upper end value of the bin for each observation.{p_end}
136
+ {p 8 12}{opt rdplot_mean_bin} middle point of the corresponding bin for each observation.{p_end}
137
+ {p 8 12}{opt rdplot_mean_x} sample mean of the running variable within the corresponding bin for each observation.{p_end}
138
+ {p 8 12}{opt rdplot_mean_y} sample mean of the outcome variable within the corresponding bin for each observation.{p_end}
139
+ {p 8 12}{opt rdplot_se_y} standard deviation of the mean of the outcome variable within the corresponding bin for each observation.{p_end}
140
+ {p 8 12}{opt rdplot_ci_l} lower end value of the confidence interval for the sample mean of the outcome variable within the corresponding bin for each observation.{p_end}
141
+ {p 8 12}{opt rdplot_ci_r} upper end value of the confidence interval for the sample mean of the outcome variable within the corresponding bin for each observation.{p_end}
142
+ {p 8 12}{opt rdplot_hat_y} predicted value of the outcome variable given by the global polynomial estimator.{p_end}
143
+
144
+
145
+ {hline}
146
+
147
+
148
+ {marker examples}{...}
149
+ {title:Example: Cattaneo, Frandsen and Titiunik (2015) Incumbency Data}
150
+
151
+ {p 4 8}Setup{p_end}
152
+ {p 8 8}{cmd:. use rdrobust_senate.dta}{p_end}
153
+
154
+ {p 4 8}Basic specification with title{p_end}
155
+ {p 8 8}{cmd:. rdplot vote margin, graph_options(title(RD Plot))}{p_end}
156
+
157
+ {p 4 8}Quadratic global polynomial with confidence bands{p_end}
158
+ {p 8 8}{cmd:. rdplot vote margin, p(2) ci(95) shade}{p_end}
159
+
160
+ {marker stored_results}{...}
161
+ {title:Stored results}
162
+
163
+ {p 4 8}{cmd:rdplot} stores the following in {cmd:e()}:
164
+
165
+ {synoptset 20 tabbed}{...}
166
+ {p2col 5 20 24 2: Scalars}{p_end}
167
+ {synopt:{cmd:e(N_l)}}original number of observations to the left of the cutoff{p_end}
168
+ {synopt:{cmd:e(N_r)}}original number of observations to the right of the cutoff{p_end}
169
+ {synopt:{cmd:e(c)}}cutoff value{p_end}
170
+ {synopt:{cmd:e(J_star_l)}}selected number of bins to the left of the cutoff{p_end}
171
+ {synopt:{cmd:e(J_star_r)}}selected number of bins to the right of the cutoff{p_end}
172
+
173
+ {p2col 5 20 24 2: Macros}{p_end}
174
+ {synopt:{cmd:e(binselect)}}method used to compute the optimal number of bins{p_end}
175
+
176
+ {synoptset 20 tabbed}{...}
177
+ {p2col 5 20 24 2: Matrices}{p_end}
178
+ {synopt:{cmd:e(coef_l)}}coefficients of the {it:p}-th order polynomial estimated to the left of the cutoff{p_end}
179
+ {synopt:{cmd:e(coef_r)}}coefficients of the {it:p}-th order polynomial estimated to the right of the cutoff{p_end}
180
+
181
+
182
+ {marker references}{...}
183
+ {title:References}
184
+
185
+ {p 4 8}Calonico, S., M. D. Cattaneo, M. H. Farrell, and R. Titiunik. 2017.
186
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2017_Stata.pdf":rdrobust: Software for Regression Discontinuity Designs}.
187
+ {it:Stata Journal} 17(2): 372-404.{p_end}
188
+
189
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2014b.
190
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Robust Data-Driven Inference in the Regression-Discontinuity Design}.
191
+ {it:Stata Journal} 14(4): 909-946.{p_end}
192
+
193
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2015a.
194
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_JASA.pdf":Optimal Data-Driven Regression Discontinuity Plots}.
195
+ {it:Journal of the American Statistical Association} 110(512): 1753-1769.{p_end}
196
+
197
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2015b.
198
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_R.pdf":rdrobust: An R Package for Robust Nonparametric Inference in Regression-Discontinuity Designs}.
199
+ {it:R Journal} 7(1): 38-51.{p_end}
200
+
201
+ {p 4 8}Cattaneo, M. D., B. Frandsen, and R. Titiunik. 2015.
202
+ {browse "https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf":Randomization Inference in the Regression Discontinuity Design: An Application to Party Advantages in the U.S. Senate}.
203
+ {it:Journal of Causal Inference} 3(1): 1-24.{p_end}
204
+
205
+
206
+ {marker authors}{...}
207
+ {title:Authors}
208
+
209
+ {p 4 8}Sebastian Calonico, Columbia University, New York, NY.
210
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
211
+
212
+ {p 4 8}Matias D. Cattaneo, Princeton University, Princeton, NJ.
213
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
214
+
215
+ {p 4 8}Max H. Farrell, University of Chicago, Chicago, IL.
216
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
217
+
218
+ {p 4 8}Rocio Titiunik, Princeton University, Princeton, NJ.
219
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
220
+
221
+
222
+
30/replication_package/Adofiles/rd_2021/rdrobust.ado ADDED
@@ -0,0 +1,1009 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *!version 8.1.0 2021-02-22
2
+
3
+ capture program drop rdrobust
4
+ program define rdrobust, eclass
5
+ syntax anything [if] [in] [, c(real 0) fuzzy(string) deriv(real 0) p(real 1) q(real 0) h(string) b(string) rho(real 0) covs(string) covs_drop(string) kernel(string) weights(string) bwselect(string) vce(string) level(real 95) all scalepar(real 1) scaleregul(real 1) nochecks masspoints(string) bwcheck(real 0) bwrestrict(string) stdvars(string)]
6
+ *disp in yellow "Preparing data."
7
+ marksample touse
8
+ preserve
9
+ qui keep if `touse'
10
+ tokenize "`anything'"
11
+ local y `1'
12
+ local x `2'
13
+ local kernel = lower("`kernel'")
14
+ local bwselect = lower("`bwselect'")
15
+
16
+ ******************** Set VCE ***************************
17
+ local nnmatch = 3
18
+ tokenize `vce'
19
+ local w : word count `vce'
20
+ if `w' == 1 {
21
+ local vce_select `"`1'"'
22
+ }
23
+ if `w' == 2 {
24
+ local vce_select `"`1'"'
25
+ if ("`vce_select'"=="nn") local nnmatch `"`2'"'
26
+ if ("`vce_select'"=="cluster" | "`vce_select'"=="nncluster") local clustvar `"`2'"'
27
+ }
28
+ if `w' == 3 {
29
+ local vce_select `"`1'"'
30
+ local clustvar `"`2'"'
31
+ local nnmatch `"`3'"'
32
+ if ("`vce_select'"!="cluster" & "`vce_select'"!="nncluster") di as error "{err}{cmd:vce()} incorrectly specified"
33
+ }
34
+ if `w' > 3 {
35
+ di as error "{err}{cmd:vce()} incorrectly specified"
36
+ exit 125
37
+ }
38
+
39
+ local vce_type = "NN"
40
+ if ("`vce_select'"=="hc0") local vce_type = "HC0"
41
+ if ("`vce_select'"=="hc1") local vce_type = "HC1"
42
+ if ("`vce_select'"=="hc2") local vce_type = "HC2"
43
+ if ("`vce_select'"=="hc3") local vce_type = "HC3"
44
+ if ("`vce_select'"=="cluster") local vce_type = "Cluster"
45
+ if ("`vce_select'"=="nncluster") local vce_type = "NNcluster"
46
+
47
+ if ("`vce_select'"=="cluster" | "`vce_select'"=="nncluster") local cluster = "cluster"
48
+ if ("`vce_select'"=="cluster") local vce_select = "hc0"
49
+ if ("`vce_select'"=="nncluster") local vce_select = "nn"
50
+ if ("`vce_select'"=="") local vce_select = "nn"
51
+
52
+ ******************** Set BW ***************************
53
+ tokenize `h'
54
+ local w : word count `h'
55
+ if `w' == 1 {
56
+ local h_l `"`1'"'
57
+ local h_r `"`1'"'
58
+ }
59
+ if `w' == 2 {
60
+ local h_l `"`1'"'
61
+ local h_r `"`2'"'
62
+ }
63
+ if `w' >= 3 {
64
+ di as error "{err}{cmd:h()} only accepts two inputs"
65
+ exit 125
66
+ }
67
+
68
+ tokenize `b'
69
+ local w : word count `b'
70
+ if `w' == 1 {
71
+ local b_l `"`1'"'
72
+ local b_r `"`1'"'
73
+ }
74
+ if `w' == 2 {
75
+ local b_l `"`1'"'
76
+ local b_r `"`2'"'
77
+ }
78
+ if `w' >= 3 {
79
+ di as error "{err}{cmd:b()} only accepts two inputs"
80
+ exit 125
81
+ }
82
+
83
+ *** Manual bandwidth
84
+ if ("`h'"!="") {
85
+ local bwselect = "Manual"
86
+ *if ("`b_l'"=="" & "`b_r'"=="" & "`h_l'"!="" & "`h_r'"!="") {
87
+ if ("`b'"=="") {
88
+ local b_r = `h_r'
89
+ local b_l = `h_l'
90
+ }
91
+ if ("`rho'">"0") {
92
+ local b_l = `h_l'/`rho'
93
+ local b_r = `h_r'/`rho'
94
+ }
95
+ }
96
+
97
+ *** Default bandwidth
98
+ if ("`h'"=="" & "`bwselect'"=="") local bwselect= "mserd"
99
+
100
+ ******************** Set Fuzzy***************************
101
+ tokenize `fuzzy'
102
+ local w : word count `fuzzy'
103
+ if `w' == 1 {
104
+ local fuzzyvar `"`1'"'
105
+ }
106
+ if `w' == 2 {
107
+ local fuzzyvar `"`1'"'
108
+ local sharpbw `"`2'"'
109
+ if `"`2'"' != "sharpbw" {
110
+ di as error "{err}fuzzy() only accepts sharpbw as a second input"
111
+ exit 125
112
+ }
113
+ }
114
+ if `w' >= 3 {
115
+ di as error "{err}{cmd:fuzzy()} only accepts two inputs"
116
+ exit 125
117
+ }
118
+
119
+ **** DROP MISSINGS **********************************************
120
+ qui drop if `y'==. | `x'==.
121
+ if ("`fuzzy'"~="") qui drop if `fuzzyvar'==.
122
+ if ("`cluster'"!="") qui drop if `clustvar'==.
123
+ if ("`covs'"~="") {
124
+ qui ds `covs', alpha
125
+ local covs_list = r(varlist)
126
+ local ncovs: word count `covs_list'
127
+ foreach z in `covs_list' {
128
+ qui drop if `z'==.
129
+ }
130
+ }
131
+
132
+ **** CHECK colinearity ******************************************
133
+ local covs_drop_coll = 0
134
+ if ("`covs_drop'"=="") local covs_drop = "pinv"
135
+ if ("`covs'"~="") {
136
+
137
+ if ("`covs_drop'"=="invsym") local covs_drop_coll = 1
138
+ if ("`covs_drop'"=="pinv") local covs_drop_coll = 2
139
+
140
+ qui _rmcoll `covs_list'
141
+ local nocoll_controls_cat `r(varlist)'
142
+ local nocoll_controls ""
143
+ foreach myString of local nocoll_controls_cat {
144
+ if ~strpos("`myString'", "o."){
145
+ if ~strpos("`myString'", "MYRUNVAR"){
146
+ local nocoll_controls "`nocoll_controls' `myString'"
147
+ }
148
+ }
149
+ }
150
+ local covs_new `nocoll_controls'
151
+ qui ds `covs_new', alpha
152
+ local covs_list_new = r(varlist)
153
+ local ncovs_new: word count `covs_list_new'
154
+
155
+ if (`ncovs_new'<`ncovs') {
156
+ if ("`covs_drop'"=="off") {
157
+ di as error "{err}Multicollinearity issue detected in {cmd:covs}. Please rescale and/or remove redundant covariates, or add {cmd:covs_drop} option."
158
+ exit 125
159
+ }
160
+ else {
161
+ local ncovs = "`ncovs_new'"
162
+ local covs_list = "`covs_list_new'"
163
+ *local covs_drop_coll = 1
164
+ }
165
+ }
166
+ }
167
+
168
+
169
+ **** DEFAULTS ***************************************
170
+ if ("`masspoints'"=="") local masspoints = "adjust"
171
+ if ("`stdvars'"=="") local stdvars = "off"
172
+ if ("`bwrestrict'"=="") local bwrestrict = "on"
173
+ *****************************************************************
174
+
175
+ qui su `x', d
176
+ local N = r(N)
177
+ local x_min = r(min)
178
+ local x_max = r(max)
179
+ local x_iq = r(p75)-r(p25)
180
+ local x_sd = r(sd)
181
+
182
+ if ("`deriv'">"0" & "`p'"=="1" & "`q'"=="0") local p = `deriv'+1
183
+ if ("`q'"=="0") local q = `p'+1
184
+
185
+ **************************** BEGIN ERROR CHECKING ************************************************
186
+ if ("`nochecks'"=="") {
187
+ if (`c'<=`x_min' | `c'>=`x_max'){
188
+ di as error "{err}{cmd:c()} should be set within the range of `x'"
189
+ exit 125
190
+ }
191
+
192
+
193
+ if (`N'<20){
194
+ di as error "{err}Not enough observations to perform bandwidth calculations"
195
+ di as error "{err}Estimates computed using entire sample"
196
+ local bwselect= "Manual"
197
+
198
+ qui su `x' if `x'<`c'
199
+ local range_l = abs(r(max)-r(min))
200
+ qui su `x' if `x'>=`c'
201
+ local range_r = abs(r(max)-r(min))
202
+ local bw_range = max(`range_l',`range_r')
203
+
204
+ local h = `bw_range'
205
+ local b = `bw_range'
206
+ local h_l = `bw_range'
207
+ local h_r = `bw_range'
208
+ local b_l = `bw_range'
209
+ local b_r = `bw_range'
210
+ }
211
+
212
+ if ("`kernel'"~="uni" & "`kernel'"~="uniform" & "`kernel'"~="tri" & "`kernel'"~="triangular" & "`kernel'"~="epa" & "`kernel'"~="epanechnikov" & "`kernel'"~="" ){
213
+ di as error "{err}{cmd:kernel()} incorrectly specified"
214
+ exit 7
215
+ }
216
+
217
+ if ("`bwselect'"=="CCT" | "`bwselect'"=="IK" | "`bwselect'"=="CV" |"`bwselect'"=="cct" | "`bwselect'"=="ik" | "`bwselect'"=="cv"){
218
+ di as error "{err}{cmd:bwselect()} options IK, CCT and CV have been depricated. Please see help for new options"
219
+ exit 7
220
+ }
221
+
222
+ if ("`bwselect'"!="mserd" & "`bwselect'"!="msetwo" & "`bwselect'"!="msesum" & "`bwselect'"!="msecomb1" & "`bwselect'"!="msecomb2" & "`bwselect'"!="cerrd" & "`bwselect'"!="certwo" & "`bwselect'"!="cersum" & "`bwselect'"!="cercomb1" & "`bwselect'"!="cercomb2" & "`bwselect'"~="Manual"){
223
+ di as error "{err}{cmd:bwselect()} incorrectly specified"
224
+ exit 7
225
+ }
226
+
227
+ if ("`vce_select'"~="nn" & "`vce_select'"~="" & "`vce_select'"~="cluster" & "`vce_select'"~="nncluster" & "`vce_select'"~="hc1" & "`vce_select'"~="hc2" & "`vce_select'"~="hc3" & "`vce_select'"~="hc0"){
228
+ di as error "{err}{cmd:vce()} incorrectly specified"
229
+ exit 7
230
+ }
231
+
232
+ if ("`p'"<"0" | "`q'"<="0" | "`deriv'"<"0" | "`nnmatch'"<="0" ){
233
+ di as error "{err}{cmd:p()}, {cmd:q()}, {cmd:deriv()}, {cmd:nnmatch()} should be positive"
234
+ exit 411
235
+ }
236
+
237
+ if ("`p'">="`q'" & "`q'">"0"){
238
+ di as error "{err}{cmd:q()} should be higher than {cmd:p()}"
239
+ exit 125
240
+ }
241
+
242
+ if ("`deriv'">"`p'" & "`deriv'">"0" ){
243
+ di as error "{err}{cmd:deriv()} can not be higher than {cmd:p()}"
244
+ exit 125
245
+ }
246
+
247
+ if ("`p'">"0" ) {
248
+ local p_round = round(`p')/`p'
249
+ local q_round = round(`q')/`q'
250
+ local d_round = round(`deriv'+1)/(`deriv'+1)
251
+ local m_round = round(`nnmatch')/`nnmatch'
252
+
253
+ if (`p_round'!=1 | `q_round'!=1 |`d_round'!=1 |`m_round'!=1 ){
254
+ di as error "{err}{cmd:p()}, {cmd:q()}, {cmd:deriv()} and {cmd:nnmatch()} should be integers"
255
+ exit 126
256
+ }
257
+ }
258
+ if (`level'>100 | `level'<=0){
259
+ di as error "{err}{cmd:level()}should be set between 0 and 100"
260
+ exit 125
261
+ }
262
+ }
263
+ *********************** END ERROR CHECKING ************************************************************
264
+
265
+ if ("`vce_select'"=="nn" | "`masspoints'"=="check" | "`masspoints'"=="adjust") {
266
+ sort `x', stable
267
+ if ("`vce_select'"=="nn") {
268
+ tempvar dups dupsid
269
+ by `x': gen dups = _N
270
+ by `x': gen dupsid = _n
271
+ }
272
+ }
273
+
274
+ if ("`kernel'"=="epanechnikov" | "`kernel'"=="epa") {
275
+ local kernel_type = "Epanechnikov"
276
+ local C_c = 2.34
277
+ }
278
+ else if ("`kernel'"=="uniform" | "`kernel'"=="uni") {
279
+ local kernel_type = "Uniform"
280
+ local C_c = 1.843
281
+ }
282
+ else {
283
+ local kernel_type = "Triangular"
284
+ local C_c = 2.576
285
+ }
286
+
287
+ *** Start MATA ********************************************************
288
+
289
+ mata{
290
+
291
+ *** Preparing data
292
+ Y = st_data(.,("`y'"), 0); X = st_data(.,("`x'"), 0)
293
+ ind_l = selectindex(X:<`c'); ind_r = selectindex(X:>=`c')
294
+ X_l = X[ind_l]; X_r = X[ind_r]
295
+ Y_l = Y[ind_l]; Y_r = Y[ind_r]
296
+ dZ=dT=dC=Z_l=Z_r=T_l=T_r=C_l=C_r=fw_l=fw_r=g_l=g_r=dups_l=dups_r=dupsid_l=dupsid_r=g_l=g_r=eT_l=eT_r=eZ_l=eZ_r=indC_l=indC_r=eC_l=eC_r=0
297
+
298
+ N = length(X); N_l = length(X_l); N_r = length(X_r)
299
+
300
+ if ("`covs'"~="") {
301
+ Z = st_data(.,tokens("`covs_list'"), 0); dZ = cols(Z)
302
+ Z_l = Z[ind_l,]; Z_r = Z[ind_r,]
303
+ }
304
+
305
+ if ("`fuzzy'"~="") {
306
+ T = st_data(.,("`fuzzyvar'"), 0); T_l = T[ind_l]; T_r = T[ind_r]; dT = 1
307
+ if (variance(T_l)==0 | variance(T_r)==0){
308
+ T_l = T_r = 0
309
+ st_local("perf_comp","perf_comp")
310
+ }
311
+ if ("`sharpbw'"!=""){
312
+ T_l = T_r = 0
313
+ st_local("sharpbw","sharpbw")
314
+ }
315
+ }
316
+
317
+ if ("`cluster'"!="") {
318
+ C = st_data(.,("`clustvar'"), 0)
319
+ C_l = C[ind_l]; C_r = C[ind_r]
320
+ indC_l = order(C_l,1); indC_r = order(C_r,1)
321
+ g_l = rows(panelsetup(C_l[indC_l],1)); g_r = rows(panelsetup(C_r[indC_r],1))
322
+ st_numscalar("g_l", g_l); st_numscalar("g_r", g_r)
323
+ }
324
+
325
+ if ("`weights'"~="") {
326
+ fw = st_data(.,("`weights'"), 0)
327
+ fw_l = fw[ind_l]; fw_r = fw[ind_r]
328
+ }
329
+
330
+ if ("`vce_select'"=="nn") {
331
+ dups = st_data(.,("dups"), 0); dupsid = st_data(.,("dupsid"), 0)
332
+ dups_l = dups[ind_l]; dups_r = dups[ind_r]
333
+ dupsid_l = dupsid[ind_l]; dupsid_r = dupsid[ind_r]
334
+ }
335
+
336
+
337
+ h_l = `h_l'
338
+ h_r = `h_r'
339
+ b_l = `b_l'
340
+ b_r = `b_r'
341
+
342
+ ***********************************************************************
343
+ ******** Computing bandwidth selector *********************************
344
+ ***********************************************************************
345
+ masspoints_found = 0
346
+
347
+ if ("`h'"=="") {
348
+
349
+ BWp = min((`x_sd',`x_iq'/1.349))
350
+ x_sd = y_sd = 1
351
+ c = `c'
352
+ *** Starndardized ******************
353
+ if ("`stdvars'"=="on") {
354
+ y_sd = sqrt(variance(Y))
355
+ x_sd = sqrt(variance(X))
356
+ X_l = X_l/x_sd; X_r = X_r/x_sd
357
+ Y_l = Y_l/y_sd; Y_r = Y_r/y_sd
358
+ c = `c'/x_sd
359
+ BWp = min((1, (`x_iq'/x_sd)/1.349))
360
+ }
361
+ x_l_min = min(X_l); x_l_max = max(X_l)
362
+ x_r_min = min(X_r); x_r_max = max(X_r)
363
+
364
+ range_l = c - x_l_min
365
+ range_r = x_r_max - c
366
+ ************************************
367
+
368
+ mN = `N'
369
+ bwcheck = `bwcheck'
370
+ covs_drop_coll = `covs_drop_coll'
371
+
372
+ if ("`masspoints'"=="check" | "`masspoints'"=="adjust") {
373
+ X_uniq_l = sort(uniqrows(X_l),-1)
374
+ X_uniq_r = uniqrows(X_r)
375
+ M_l = length(X_uniq_l)
376
+ M_r = length(X_uniq_r)
377
+ M = M_l + M_r
378
+ st_numscalar("M_l", M_l); st_numscalar("M_r", M_r)
379
+ mass_l = 1-M_l/N_l
380
+ mass_r = 1-M_r/N_r
381
+ if (mass_l>=0.1 | mass_r>=0.1){
382
+ masspoints_found = 1
383
+ display("{err}Mass points detected in the running variable.")
384
+ if ("`masspoints'"=="adjust" & "`bwcheck'"=="0") bwcheck = 10
385
+ if ("`masspoints'"=="check") display("{err}Try using option {cmd:masspoints(adjust)}.")
386
+ }
387
+ }
388
+
389
+
390
+ c_bw = `C_c'*BWp*mN^(-1/5)
391
+ if ("`masspoints'"=="adjust") c_bw = `C_c'*BWp*M^(-1/5)
392
+ if ("`bwrestrict'"=="on") {
393
+ bw_max = max((range_l,range_r))
394
+ c_bw = min((c_bw, bw_max))
395
+ }
396
+ if (bwcheck > 0) {
397
+ bwcheck_l = min((bwcheck, M_l))
398
+ bwcheck_r = min((bwcheck, M_r))
399
+ bw_min_l = abs(X_uniq_l:-c)[bwcheck_l] + 1e-8
400
+ bw_min_r = abs(X_uniq_r:-c)[bwcheck_r] + 1e-8
401
+ c_bw = max((c_bw, bw_min_l, bw_min_r))
402
+ }
403
+
404
+
405
+ *** Step 1: d_bw
406
+ C_d_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`q'+1, nu=`q'+1, o_B=`q'+2, h_V=c_bw, h_B=range_l+1e-8, 0, "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
407
+ C_d_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`q'+1, nu=`q'+1, o_B=`q'+2, h_V=c_bw, h_B=range_r+1e-8, 0, "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
408
+ if (C_d_l[1]==0 | C_d_l[2]==0 | C_d_r[1]==0 | C_d_r[2]==0 |C_d_l[1]==. | C_d_l[2]==. | C_d_l[3]==. |C_d_r[1]==. | C_d_r[2]==. | C_d_r[3]==.) printf("{err}Not enough variability to compute the preliminary bandwidth. Try checking for mass points with option {cmd:masspoints(check)}.\n")
409
+
410
+ *printf("i=%g\n ",C_d_l[5])
411
+ *printf("i=%g\n ",C_d_r[5])
412
+
413
+
414
+ *** BW-TWO
415
+ if ("`bwselect'"=="msetwo" | "`bwselect'"=="certwo" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb2" ) {
416
+ * Preliminar bw
417
+ d_bw_l = ( (C_d_l[1] / C_d_l[2]^2) * (`N'/mN) )^C_d_l[4]
418
+ d_bw_r = ( (C_d_r[1] / C_d_r[2]^2) * (`N'/mN) )^C_d_l[4]
419
+ if ("`bwrestrict'"=="on") {
420
+ d_bw_l = min((d_bw_l, range_l))
421
+ d_bw_r = min((d_bw_r, range_r))
422
+ }
423
+ if (bwcheck > 0) {
424
+ d_bw_l = max((d_bw_l, bw_min_l))
425
+ d_bw_r = max((d_bw_r, bw_min_r))
426
+ }
427
+ * Bias bw
428
+ C_b_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`q', nu=`p'+1, o_B=`q'+1, h_V=c_bw, h_B=d_bw_l, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
429
+ b_bw_l = ( (C_b_l[1] / (C_b_l[2]^2 + `scaleregul'*C_b_l[3])) * (`N'/mN) )^C_b_l[4]
430
+ C_b_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`q', nu=`p'+1, o_B=`q'+1, h_V=c_bw, h_B=d_bw_r, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
431
+ b_bw_r = ( (C_b_r[1] / (C_b_r[2]^2 + `scaleregul'*C_b_r[3])) * (`N'/mN) )^C_b_l[4]
432
+ if ("`bwrestrict'"=="on") {
433
+ b_bw_l = min((b_bw_l, range_l))
434
+ b_bw_r = min((b_bw_r, range_r))
435
+ }
436
+ * Main bw
437
+ C_h_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`p', nu=`deriv', o_B=`q', h_V=c_bw, h_B=b_bw_l, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
438
+ h_bw_l = ( (C_h_l[1] / (C_h_l[2]^2 + `scaleregul'*C_h_l[3])) * (`N'/mN) )^C_h_l[4]
439
+ C_h_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`p', nu=`deriv', o_B=`q', h_V=c_bw, h_B=b_bw_r, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
440
+ h_bw_r = ( (C_h_r[1] / (C_h_r[2]^2 + `scaleregul'*C_h_r[3])) * (`N'/mN) )^C_h_l[4]
441
+ if ("`bwrestrict'"=="on") {
442
+ h_bw_l = min((h_bw_l, range_l))
443
+ h_bw_r = min((h_bw_r, range_r))
444
+ }
445
+ }
446
+
447
+ *** BW-SUM
448
+ if ("`bwselect'"=="msesum" | "`bwselect'"=="cersum" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2") {
449
+ * Preliminar bw
450
+ d_bw_s = ( ((C_d_l[1] + C_d_r[1]) / (C_d_r[2] + C_d_l[2])^2) * (`N'/mN) )^C_d_l[4]
451
+ if ("`bwrestrict'"=="on") d_bw_s = min((d_bw_s, bw_max))
452
+ if (bwcheck > 0) d_bw_s = max((d_bw_s, bw_min_l, bw_min_r))
453
+ * Bias bw
454
+ C_b_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`q', nu=`p'+1, o_B=`q'+1, h_V=c_bw, h_B=d_bw_s, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
455
+ C_b_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`q', nu=`p'+1, o_B=`q'+1, h_V=c_bw, h_B=d_bw_s, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
456
+ b_bw_s = ( ((C_b_l[1] + C_b_r[1]) / ((C_b_r[2] + C_b_l[2])^2 + `scaleregul'*(C_b_r[3]+C_b_l[3]))) * (`N'/mN) )^C_b_l[4]
457
+ if ("`bwrestrict'"=="on") b_bw_s = min((b_bw_s, bw_max))
458
+ * Main bw
459
+ C_h_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`p', nu=`deriv', o_B=`q', h_V=c_bw, h_B=b_bw_s, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
460
+ C_h_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`p', nu=`deriv', o_B=`q', h_V=c_bw, h_B=b_bw_s, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
461
+ h_bw_s = ( ((C_h_l[1] + C_h_r[1]) / ((C_h_r[2] + C_h_l[2])^2 + `scaleregul'*(C_h_r[3] + C_h_l[3]))) * (`N'/mN) )^C_h_l[4]
462
+ if ("`bwrestrict'"=="on") h_bw_s = min((h_bw_s, bw_max))
463
+ }
464
+
465
+ *** RD
466
+ if ("`bwselect'"=="mserd" | "`bwselect'"=="cerrd" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2" | "`bwselect'"=="") {
467
+ * Preliminar bw
468
+ d_bw_d = ( ((C_d_l[1] + C_d_r[1]) / (C_d_r[2] - C_d_l[2])^2) * (`N'/mN) )^C_d_l[4]
469
+ if ("`bwrestrict'"=="on") d_bw_d = min((d_bw_d, bw_max))
470
+
471
+ if (bwcheck > 0) d_bw_d = max((d_bw_d, bw_min_l, bw_min_r))
472
+ * Bias bw
473
+ C_b_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`q', nu=`p'+1, o_B=`q'+1, h_V=c_bw, h_B=d_bw_d, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
474
+ C_b_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`q', nu=`p'+1, o_B=`q'+1, h_V=c_bw, h_B=d_bw_d, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
475
+ b_bw_d = ( ((C_b_l[1] + C_b_r[1]) / ((C_b_r[2] - C_b_l[2])^2 + `scaleregul'*(C_b_r[3] + C_b_l[3]))) * (`N'/mN) )^C_b_l[4]
476
+ if ("`bwrestrict'"=="on") b_bw_d = min((b_bw_d, bw_max))
477
+
478
+ * Main bw
479
+ C_h_l = rdrobust_bw(Y_l, X_l, T_l, Z_l, C_l, fw_l, c=c, o=`p', nu=`deriv', o_B=`q', h_V=c_bw, h_B=b_bw_d, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_l, dupsid_l, covs_drop_coll)
480
+ C_h_r = rdrobust_bw(Y_r, X_r, T_r, Z_r, C_r, fw_r, c=c, o=`p', nu=`deriv', o_B=`q', h_V=c_bw, h_B=b_bw_d, `scaleregul', "`vce_select'", `nnmatch', "`kernel'", dups_r, dupsid_r, covs_drop_coll)
481
+ h_bw_d = ( ((C_h_l[1] + C_h_r[1]) / ((C_h_r[2] - C_h_l[2])^2 + `scaleregul'*(C_h_r[3] + C_h_l[3]))) * (`N'/mN) )^C_h_l[4]
482
+ if ("`bwrestrict'"=="on") h_bw_d = min((h_bw_d, bw_max))
483
+
484
+ }
485
+
486
+
487
+
488
+ if (C_b_l[1]==0 | C_b_l[2]==0 | C_b_r[1]==0 | C_b_r[2]==0 |C_b_l[1]==. | C_b_l[2]==. | C_b_l[3]==. | C_b_r[1]==. | C_b_r[2]==. | C_b_r[3]==.) printf("{err}Not enough variability to compute the bias bandwidth (b). Try checking for mass points with option {cmd:masspoints(check)}. \n")
489
+ if (C_h_l[1]==0 | C_h_l[2]==0 | C_h_r[1]==0 | C_h_r[2]==0 |C_h_l[1]==. | C_h_l[2]==. | C_h_l[3]==. | C_h_r[1]==. | C_h_r[2]==. | C_h_r[3]==.) printf("{err}Not enough variability to compute the loc. poly. bandwidth (h). Try checking for mass points with option {cmd:masspoints(check)}.\n")
490
+
491
+ cer_h = mN^(-(`p'/((3+`p')*(3+2*`p'))))
492
+ if ("`cluster'"!="") cer_h = (g_l+g_r)^(-(`p'/((3+`p')*(3+2*`p'))))
493
+ cer_b = 1
494
+
495
+ if ("`bwselect'"=="mserd" | "`bwselect'"=="cerrd" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2") {
496
+ h_l = h_r = h_mserd = x_sd*h_bw_d
497
+ b_l = b_r = b_mserd = x_sd*b_bw_d
498
+ }
499
+ if ("`bwselect'"=="msesum" | "`bwselect'"=="cersum" | "`bwselect'"=="msecomb1" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2") {
500
+ h_l = h_r = h_msesum = x_sd*h_bw_s
501
+ b_l = b_r = b_msesum = x_sd*b_bw_s
502
+ }
503
+ if ("`bwselect'"=="msetwo" | "`bwselect'"=="certwo" | "`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb2") {
504
+ h_l = h_msetwo_l = x_sd*h_bw_l
505
+ h_r = h_msetwo_r = x_sd*h_bw_r
506
+ b_l = b_msetwo_l = x_sd*b_bw_l
507
+ b_r = b_msetwo_r = x_sd*b_bw_r
508
+ }
509
+ if ("`bwselect'"=="msecomb1" | "`bwselect'"=="cercomb1") {
510
+ h_l = h_r = h_msecomb1 = min((h_mserd,h_msesum))
511
+ b_l = b_r = b_msecomb1 = min((b_mserd,b_msesum))
512
+ }
513
+ if ("`bwselect'"=="msecomb2" | "`bwselect'"=="cercomb2") {
514
+ h_l = (sort((h_mserd,h_msesum,h_msetwo_l)',1))[2]
515
+ h_r = (sort((h_mserd,h_msesum,h_msetwo_r)',1))[2]
516
+ b_l = (sort((b_mserd,b_msesum,b_msetwo_l)',1))[2]
517
+ b_r = (sort((b_mserd,b_msesum,b_msetwo_r)',1))[2]
518
+ }
519
+ if ("`bwselect'"=="cerrd" | "`bwselect'"=="cersum" | "`bwselect'"=="certwo" | "`bwselect'"=="cercomb1" | "`bwselect'"=="cercomb2"){
520
+ h_l = h_l*cer_h
521
+ h_r = h_r*cer_h
522
+ b_l = b_l*cer_b
523
+ b_r = b_r*cer_b
524
+ }
525
+
526
+ if ("`rho'">"0") {
527
+ b_l = h_l/`rho'
528
+ b_r = h_r/`rho'
529
+ }
530
+
531
+ *** De-Starndardized *********************************
532
+ c = `c'*x_sd
533
+ X_uniq_l = X_uniq_l*x_sd
534
+ X_uniq_r = X_uniq_r*x_sd
535
+ X_l = X_l*x_sd; X_r = X_r*x_sd
536
+ Y_l = Y_l*y_sd; Y_r = Y_r*y_sd
537
+ range_l = range_l*x_sd
538
+ range_r = range_r*x_sd
539
+ *****************************************************
540
+
541
+
542
+ } /* close if for bw selector */
543
+
544
+ }
545
+
546
+
547
+ mata{
548
+
549
+ *** Estimation and Inference
550
+
551
+ c = strtoreal("`c'")
552
+
553
+ w_h_l = rdrobust_kweight(X_l,`c',h_l,"`kernel'"); w_h_r = rdrobust_kweight(X_r,`c',h_r,"`kernel'")
554
+ w_b_l = rdrobust_kweight(X_l,`c',b_l,"`kernel'"); w_b_r = rdrobust_kweight(X_r,`c',b_r,"`kernel'")
555
+
556
+ if ("`weights'"~="") {
557
+ w_h_l = fw_l:*w_h_l; w_h_r = fw_r:*w_h_r
558
+ w_b_l = fw_l:*w_b_l; w_b_r = fw_r:*w_b_r
559
+ }
560
+
561
+ ind_h_l = selectindex(w_h_l:> 0); ind_h_r = selectindex(w_h_r:> 0)
562
+ ind_b_l = selectindex(w_b_l:> 0); ind_b_r = selectindex(w_b_r:> 0)
563
+ N_h_l = length(ind_h_l); N_b_l = length(ind_b_l)
564
+ N_h_r = length(ind_h_r); N_b_r = length(ind_b_r)
565
+
566
+ if (N_h_l<10 | N_h_r<10 | N_b_l<10 | N_b_r<10){
567
+ display("{err}Estimates might be unreliable due to low number of effective observations.")
568
+ *exit(1)
569
+ }
570
+
571
+ ind_l = ind_b_l; ind_r = ind_b_r
572
+ if (h_l>b_l) ind_l = ind_h_l
573
+ if (h_r>b_r) ind_r = ind_h_r
574
+ eN_l = length(ind_l); eN_r = length(ind_r)
575
+ eY_l = Y_l[ind_l]; eY_r = Y_r[ind_r]
576
+ eX_l = X_l[ind_l]; eX_r = X_r[ind_r]
577
+ W_h_l = w_h_l[ind_l]; W_h_r = w_h_r[ind_r]
578
+ W_b_l = w_b_l[ind_l]; W_b_r = w_b_r[ind_r]
579
+
580
+ edups_l = edups_r = edupsid_l= edupsid_r = 0
581
+ if ("`vce_select'"=="nn") {
582
+ edups_l = dups_l[ind_l]; edups_r = dups_r[ind_r]
583
+ edupsid_l = dupsid_l[ind_l]; edupsid_r = dupsid_r[ind_r]
584
+ }
585
+
586
+ u_l = (eX_l:-`c')/h_l; u_r = (eX_r:-`c')/h_r;
587
+ R_q_l = J(eN_l,(`q'+1),.); R_q_r = J(eN_r,(`q'+1),.)
588
+ for (j=1; j<=(`q'+1); j++) {
589
+ R_q_l[.,j] = (eX_l:-`c'):^(j-1); R_q_r[.,j] = (eX_r:-`c'):^(j-1)
590
+ }
591
+ R_p_l = R_q_l[,1::(`p'+1)]; R_p_r = R_q_r[,1::(`p'+1)]
592
+
593
+ ********************************************************************************
594
+ ************ Computing RD estimates ********************************************
595
+ ********************************************************************************
596
+ L_l = quadcross(R_p_l:*W_h_l,u_l:^(`p'+1)); L_r = quadcross(R_p_r:*W_h_r,u_r:^(`p'+1))
597
+ invG_q_l = cholinv(quadcross(R_q_l,W_b_l,R_q_l)); invG_q_r = cholinv(quadcross(R_q_r,W_b_r,R_q_r))
598
+ invG_p_l = cholinv(quadcross(R_p_l,W_h_l,R_p_l)); invG_p_r = cholinv(quadcross(R_p_r,W_h_r,R_p_r))
599
+
600
+ if (rank(invG_p_l)==. | rank(invG_p_r)==. | rank(invG_q_l)==. | rank(invG_q_r)==. ){
601
+ display("{err}Invertibility problem: check variability of running variable around cutoff. Try checking for mass points with option {cmd:masspoints(check)}.")
602
+ exit(1)
603
+ }
604
+
605
+ e_p1 = J((`q'+1),1,0); e_p1[`p'+2]=1
606
+ e_v = J((`p'+1),1,0); e_v[`deriv'+1]=1
607
+ Q_q_l = ((R_p_l:*W_h_l)' - h_l^(`p'+1)*(L_l*e_p1')*((invG_q_l*R_q_l')':*W_b_l)')'
608
+ Q_q_r = ((R_p_r:*W_h_r)' - h_r^(`p'+1)*(L_r*e_p1')*((invG_q_r*R_q_r')':*W_b_r)')'
609
+ D_l = eY_l; D_r = eY_r
610
+
611
+ if ("`fuzzy'"~="") {
612
+ T = st_data(.,("`fuzzyvar'"), 0); dT = 1
613
+ T_l = select(T,X:<`c'); eT_l = T_l[ind_l]
614
+ T_r = select(T,X:>=`c'); eT_r = T_r[ind_r]
615
+ D_l = D_l,eT_l; D_r = D_r,eT_r
616
+ }
617
+
618
+ if ("`covs'"~="") {
619
+ eZ_l = Z_l[ind_l,]; eZ_r = Z_r[ind_r,]
620
+ D_l = D_l,eZ_l; D_r = D_r,eZ_r
621
+ U_p_l = quadcross(R_p_l:*W_h_l,D_l); U_p_r = quadcross(R_p_r:*W_h_r,D_r)
622
+ }
623
+
624
+ if ("`cluster'"~="") {
625
+ eC_l = C_l[ind_l]; eC_r = C_r[ind_r]
626
+ indC_l = order(eC_l,1); indC_r = order(eC_r,1)
627
+ g_l = rows(panelsetup(eC_l[indC_l],1)); g_r = rows(panelsetup(eC_r[indC_r],1))
628
+ }
629
+
630
+ beta_p_l = invG_p_l*quadcross(R_p_l:*W_h_l,D_l); beta_q_l = invG_q_l*quadcross(R_q_l:*W_b_l,D_l); beta_bc_l = invG_p_l*quadcross(Q_q_l,D_l)
631
+ beta_p_r = invG_p_r*quadcross(R_p_r:*W_h_r,D_r); beta_q_r = invG_q_r*quadcross(R_q_r:*W_b_r,D_r); beta_bc_r = invG_p_r*quadcross(Q_q_r,D_r)
632
+ beta_p = beta_p_r - beta_p_l
633
+ beta_q = beta_q_r - beta_q_l
634
+ beta_bc = beta_bc_r - beta_bc_l
635
+
636
+ if (dZ==0) {
637
+ tau_cl = tau_Y_cl = `scalepar'*factorial(`deriv')*beta_p[(`deriv'+1),1]
638
+ tau_bc = tau_Y_bc = `scalepar'*factorial(`deriv')*beta_bc[(`deriv'+1),1]
639
+ s_Y = 1
640
+ tau_Y_cl_l = `scalepar'*factorial(`deriv')*beta_p_l[(`deriv'+1),1]
641
+ tau_Y_cl_r = `scalepar'*factorial(`deriv')*beta_p_r[(`deriv'+1),1]
642
+ tau_Y_bc_l = `scalepar'*factorial(`deriv')*beta_bc_l[(`deriv'+1),1]
643
+ tau_Y_bc_r = `scalepar'*factorial(`deriv')*beta_bc_r[(`deriv'+1),1]
644
+ bias_l = tau_Y_cl_l-tau_Y_bc_l
645
+ bias_r = tau_Y_cl_r-tau_Y_bc_r
646
+ if (dT>0) {
647
+ tau_T_cl = factorial(`deriv')*beta_p[(`deriv'+1),2]
648
+ tau_T_bc = factorial(`deriv')*beta_bc[(`deriv'+1),2]
649
+ s_Y = (1/tau_T_cl \ -(tau_Y_cl/tau_T_cl^2))
650
+ B_F = tau_Y_cl-tau_Y_bc \ tau_T_cl-tau_T_bc
651
+ tau_cl = tau_Y_cl/tau_T_cl
652
+ tau_bc = tau_cl - s_Y'*B_F
653
+ sV_T = 0 \ 1
654
+ tau_T_cl_l = factorial(`deriv')*beta_p_l[(`deriv'+1),2]
655
+ tau_T_cl_r = factorial(`deriv')*beta_p_r[(`deriv'+1),2]
656
+ tau_T_bc_l = factorial(`deriv')*beta_bc_l[(`deriv'+1),2]
657
+ tau_T_bc_r = factorial(`deriv')*beta_bc_r[(`deriv'+1),2]
658
+ B_F_l = tau_Y_cl_l-tau_Y_bc_l \ tau_T_cl_l-tau_T_bc_l
659
+ B_F_r = tau_Y_cl_r-tau_Y_bc_r \ tau_T_cl_r-tau_T_bc_r
660
+ bias_l = s_Y'*B_F_l
661
+ bias_r = s_Y'*B_F_r
662
+ }
663
+ }
664
+
665
+ if (dZ>0) {
666
+ ZWD_p_l = quadcross(eZ_l,W_h_l,D_l)
667
+ ZWD_p_r = quadcross(eZ_r,W_h_r,D_r)
668
+ colsZ = (2+dT)::(2+dT+dZ-1)
669
+ UiGU_p_l = quadcross(U_p_l[,colsZ],invG_p_l*U_p_l)
670
+ UiGU_p_r = quadcross(U_p_r[,colsZ],invG_p_r*U_p_r)
671
+ ZWZ_p_l = ZWD_p_l[,colsZ] - UiGU_p_l[,colsZ]
672
+ ZWZ_p_r = ZWD_p_r[,colsZ] - UiGU_p_r[,colsZ]
673
+ ZWY_p_l = ZWD_p_l[,1::1+dT] - UiGU_p_l[,1::1+dT]
674
+ ZWY_p_r = ZWD_p_r[,1::1+dT] - UiGU_p_r[,1::1+dT]
675
+ ZWZ_p = ZWZ_p_r + ZWZ_p_l
676
+ ZWY_p = ZWY_p_r + ZWY_p_l
677
+ if ("`covs_drop_coll'"=="0") gamma_p = cholinv(ZWZ_p)*ZWY_p
678
+ if ("`covs_drop_coll'"=="1") gamma_p = invsym(ZWZ_p)*ZWY_p
679
+ if ("`covs_drop_coll'"=="2") gamma_p = pinv(ZWZ_p)*ZWY_p
680
+
681
+ s_Y = (1 \ -gamma_p[,1])
682
+
683
+ if (dT==0) {
684
+ tau_cl = `scalepar'*s_Y'*beta_p[(`deriv'+1),]'
685
+ tau_bc = `scalepar'*s_Y'*beta_bc[(`deriv'+1),]'
686
+ tau_Y_cl_l = `scalepar'*s_Y'*beta_p_l[(`deriv'+1),]'
687
+ tau_Y_cl_r = `scalepar'*s_Y'*beta_p_r[(`deriv'+1),]'
688
+ tau_Y_bc_l = `scalepar'*s_Y'*beta_bc_l[(`deriv'+1),]'
689
+ tau_Y_bc_r = `scalepar'*s_Y'*beta_bc_r[(`deriv'+1),]'
690
+ bias_l = tau_Y_cl_l-tau_Y_bc_l
691
+ bias_r = tau_Y_cl_r-tau_Y_bc_r
692
+
693
+ }
694
+
695
+ if (dT>0) {
696
+ s_T = 1 \ -gamma_p[,2]
697
+ sV_T = (0 \ 1 \ -gamma_p[,2] )
698
+ tau_Y_cl = `scalepar'*factorial(`deriv')*s_Y'*vec((beta_p[(`deriv'+1),1],beta_p[(`deriv'+1),colsZ]))
699
+ tau_T_cl = factorial(`deriv')*s_T'*vec((beta_p[(`deriv'+1),2],beta_p[(`deriv'+1),colsZ]))
700
+ tau_Y_bc = `scalepar'*factorial(`deriv')*s_Y'*vec((beta_bc[(`deriv'+1),1],beta_bc[(`deriv'+1),colsZ]))
701
+ tau_T_bc = factorial(`deriv')*s_T'*vec((beta_bc[(`deriv'+1),2],beta_bc[(`deriv'+1),colsZ]))
702
+
703
+ tau_Y_cl_l = `scalepar'*factorial(`deriv')*s_Y'*vec((beta_p_l[(`deriv'+1),1], beta_p_l[(`deriv'+1),colsZ]))
704
+ tau_Y_cl_r = `scalepar'*factorial(`deriv')*s_Y'*vec((beta_p_r[(`deriv'+1),2], beta_p_r[(`deriv'+1),colsZ]))
705
+ tau_Y_bc_l = `scalepar'*factorial(`deriv')*s_Y'*vec((beta_bc_l[(`deriv'+1),1],beta_bc_l[(`deriv'+1),colsZ]))
706
+ tau_Y_bc_r = `scalepar'*factorial(`deriv')*s_Y'*vec((beta_bc_r[(`deriv'+1),2],beta_bc_r[(`deriv'+1),colsZ]))
707
+
708
+ tau_T_cl_l = factorial(`deriv')*s_T'*vec((beta_p_l[(`deriv'+1),1], beta_p_l[(`deriv'+1),colsZ]))
709
+ tau_T_cl_r = factorial(`deriv')*s_T'*vec((beta_p_r[(`deriv'+1),2], beta_p_r[(`deriv'+1),colsZ]))
710
+ tau_T_bc_l = factorial(`deriv')*s_T'*vec((beta_bc_l[(`deriv'+1),1],beta_bc_l[(`deriv'+1),colsZ]))
711
+ tau_T_bc_r = factorial(`deriv')*s_T'*vec((beta_bc_r[(`deriv'+1),2],beta_bc_r[(`deriv'+1),colsZ]))
712
+
713
+
714
+ B_F = tau_Y_cl-tau_Y_bc \ tau_T_cl-tau_T_bc
715
+ s_Y = 1/tau_T_cl \ -(tau_Y_cl/tau_T_cl^2)
716
+ tau_cl = tau_Y_cl/tau_T_cl
717
+ tau_bc = tau_cl - s_Y'*B_F
718
+
719
+ B_F_l = tau_Y_cl_l-tau_Y_bc_l \ tau_T_cl_l-tau_T_bc_l
720
+ B_F_r = tau_Y_cl_r-tau_Y_bc_r \ tau_T_cl_r-tau_T_bc_r
721
+
722
+ bias_l = s_Y'*B_F_l
723
+ bias_r = s_Y'*B_F_r
724
+
725
+ s_Y = (1/tau_T_cl \ -(tau_Y_cl/tau_T_cl^2) \ -(1/tau_T_cl)*gamma_p[,1] + (tau_Y_cl/tau_T_cl^2)*gamma_p[,2])
726
+ }
727
+ }
728
+
729
+ **************************************************************************
730
+ ************ Computing variance-covariance matrix ************************
731
+ **************************************************************************
732
+ hii_l=hii_r=predicts_p_l=predicts_p_r=predicts_q_l=predicts_q_r=0
733
+ if ("`vce_select'"=="hc0" | "`vce_select'"=="hc1" | "`vce_select'"=="hc2" | "`vce_select'"=="hc3") {
734
+ predicts_p_l=R_p_l*beta_p_l
735
+ predicts_p_r=R_p_r*beta_p_r
736
+ predicts_q_l=R_q_l*beta_q_l
737
+ predicts_q_r=R_q_r*beta_q_r
738
+ if ("`vce_select'"=="hc2" | "`vce_select'"=="hc3") {
739
+ hii_l=J(eN_l,1,.)
740
+ for (i=1; i<=eN_l; i++) {
741
+ hii_l[i] = R_p_l[i,]*invG_p_l*(R_p_l:*W_h_l)[i,]'
742
+ }
743
+ hii_r=J(eN_r,1,.)
744
+ for (i=1; i<=eN_r; i++) {
745
+ hii_r[i] = R_p_r[i,]*invG_p_r*(R_p_r:*W_h_r)[i,]'
746
+ }
747
+ }
748
+ }
749
+
750
+ res_h_l = rdrobust_res(eX_l, eY_l, eT_l, eZ_l, predicts_p_l, hii_l, "`vce_select'", `nnmatch', edups_l, edupsid_l, `p'+1)
751
+ res_h_r = rdrobust_res(eX_r, eY_r, eT_r, eZ_r, predicts_p_r, hii_r, "`vce_select'", `nnmatch', edups_r, edupsid_r, `p'+1)
752
+ if ("`vce_select'"=="nn") {
753
+ res_b_l = res_h_l; res_b_r = res_h_r
754
+ }
755
+ else {
756
+ res_b_l = rdrobust_res(eX_l, eY_l, eT_l, eZ_l, predicts_q_l, hii_l, "`vce_select'", `nnmatch', edups_l, edupsid_l, `q'+1)
757
+ res_b_r = rdrobust_res(eX_r, eY_r, eT_r, eZ_r, predicts_q_r, hii_r, "`vce_select'", `nnmatch', edups_r, edupsid_r, `q'+1)
758
+ }
759
+
760
+ V_Y_cl_l = invG_p_l*rdrobust_vce(dT+dZ, s_Y, R_p_l:*W_h_l, res_h_l, eC_l, indC_l)*invG_p_l
761
+ V_Y_cl_r = invG_p_r*rdrobust_vce(dT+dZ, s_Y, R_p_r:*W_h_r, res_h_r, eC_r, indC_r)*invG_p_r
762
+ V_Y_bc_l = invG_p_l*rdrobust_vce(dT+dZ, s_Y, Q_q_l, res_b_l, eC_l, indC_l)*invG_p_l
763
+ V_Y_bc_r = invG_p_r*rdrobust_vce(dT+dZ, s_Y, Q_q_r, res_b_r, eC_r, indC_r)*invG_p_r
764
+ V_tau_cl = (`scalepar')^2*factorial(`deriv')^2*(V_Y_cl_l+V_Y_cl_r)[`deriv'+1,`deriv'+1]
765
+ V_tau_rb = (`scalepar')^2*factorial(`deriv')^2*(V_Y_bc_l+V_Y_bc_r)[`deriv'+1,`deriv'+1]
766
+ se_tau_cl = sqrt(V_tau_cl); se_tau_rb = sqrt(V_tau_rb)
767
+
768
+ if ("`fuzzy'"!="") {
769
+ V_T_cl_l = invG_p_l*rdrobust_vce(dT+dZ, sV_T, R_p_l:*W_h_l, res_h_l, eC_l, indC_l)*invG_p_l
770
+ V_T_cl_r = invG_p_r*rdrobust_vce(dT+dZ, sV_T, R_p_r:*W_h_r, res_h_r, eC_r, indC_r)*invG_p_r
771
+ V_T_bc_l = invG_p_l*rdrobust_vce(dT+dZ, sV_T, Q_q_l, res_b_l, eC_l, indC_l)*invG_p_l
772
+ V_T_bc_r = invG_p_r*rdrobust_vce(dT+dZ, sV_T, Q_q_r, res_b_r, eC_r, indC_r)*invG_p_r
773
+ V_T_cl = factorial(`deriv')^2*(V_T_cl_l+V_T_cl_r)[`deriv'+1,`deriv'+1]
774
+ V_T_rb = factorial(`deriv')^2*(V_T_bc_l+V_T_bc_r)[`deriv'+1,`deriv'+1]
775
+ se_tau_T_cl = sqrt(V_T_cl); se_tau_T_rb = sqrt(V_T_rb)
776
+ }
777
+
778
+
779
+ **** Stored results
780
+ st_numscalar("N", N)
781
+ st_numscalar("N_l", N_l)
782
+ st_numscalar("N_r", N_r)
783
+ st_numscalar("x_l_min", x_l_min)
784
+ st_numscalar("x_l_max", x_l_max)
785
+ st_numscalar("x_r_min", x_r_min)
786
+ st_numscalar("x_r_max", x_r_max)
787
+
788
+ st_numscalar("h_l", h_l)
789
+ st_numscalar("h_r", h_r)
790
+ st_numscalar("b_l", b_l)
791
+ st_numscalar("b_r", b_r)
792
+
793
+ st_numscalar("quant", -invnormal(abs((1-(`level'/100))/2)))
794
+ st_numscalar("N_h_l", N_h_l); st_numscalar("N_b_l", N_b_l)
795
+ st_numscalar("N_h_r", N_h_r); st_numscalar("N_b_r", N_b_r)
796
+ st_numscalar("tau_cl", tau_cl); st_numscalar("se_tau_cl", se_tau_cl)
797
+ st_numscalar("tau_bc", tau_bc); st_numscalar("se_tau_rb", se_tau_rb)
798
+ st_numscalar("tau_Y_cl_r", tau_Y_cl_r); st_numscalar("tau_Y_cl_l", tau_Y_cl_l)
799
+ st_numscalar("tau_Y_bc_r", tau_Y_bc_r); st_numscalar("tau_Y_bc_l", tau_Y_bc_l)
800
+ st_numscalar("bias_l", bias_l); st_numscalar("bias_r", bias_r)
801
+ st_matrix("beta_p_r", beta_p_r); st_matrix("beta_p_l", beta_p_l)
802
+ st_matrix("beta_q_r", beta_q_r); st_matrix("beta_q_l", beta_q_l)
803
+ st_numscalar("g_l", g_l); st_numscalar("g_r", g_r)
804
+ st_matrix("b", (tau_cl))
805
+ st_matrix("V", (V_tau_cl))
806
+ st_matrix("V_Y_cl_r", V_Y_cl_r); st_matrix("V_Y_cl_l", V_Y_cl_l)
807
+ st_matrix("V_Y_bc_r", V_Y_bc_r); st_matrix("V_Y_bc_l", V_Y_bc_l)
808
+ st_numscalar("masspoints_found", masspoints_found)
809
+
810
+ if ("`all'"~="") {
811
+ st_matrix("b", (tau_cl,tau_bc,tau_bc))
812
+ st_matrix("V", (V_tau_cl,0,0 \ 0,V_tau_cl,0 \0,0,V_tau_rb))
813
+ }
814
+
815
+ if ("`fuzzy'"!="") {
816
+ st_numscalar("tau_T_cl", tau_T_cl); st_numscalar("se_tau_T_cl", se_tau_T_cl)
817
+ st_numscalar("tau_T_bc", tau_T_bc); st_numscalar("se_tau_T_rb", se_tau_T_rb)
818
+
819
+ st_numscalar("tau_T_cl_r", tau_T_cl_r); st_numscalar("tau_T_cl_l", tau_T_cl_l)
820
+ st_numscalar("tau_T_bc_r", tau_T_bc_r); st_numscalar("tau_T_bc_l", tau_T_bc_l)
821
+ }
822
+ }
823
+
824
+ ************************************************
825
+ ********* OUTPUT TABLE *************************
826
+ ************************************************
827
+ local rho_l = scalar(h_l)/scalar(b_l)
828
+ local rho_r = scalar(h_r)/scalar(b_r)
829
+
830
+ disp ""
831
+ if "`fuzzy'"=="" {
832
+ if ("`covs'"=="") {
833
+ if ("`deriv'"=="0") disp "Sharp RD estimates using local polynomial regression."
834
+ else if ("`deriv'"=="1") disp "Sharp kink RD estimates using local polynomial regression."
835
+ else disp "Sharp RD estimates using local polynomial regression. Derivative of order " `deriv' "."
836
+ }
837
+ else {
838
+ if ("`deriv'"=="0") disp "Covariate-adjusted sharp RD estimates using local polynomial regression."
839
+ else if ("`deriv'"=="1") disp "Covariate-adjusted sharp kink RD estimates using local polynomial regression."
840
+ else disp "Covariate-adjusted sharp RD estimates using local polynomial regression. Derivative of order " `deriv' "."
841
+ }
842
+ }
843
+ else {
844
+ if ("`covs'"=="") {
845
+ if ("`deriv'"=="0") disp "Fuzzy RD estimates using local polynomial regression."
846
+ else if ("`deriv'"=="1") disp "Fuzzy kink RD estimates using local polynomial regression."
847
+ else disp "Fuzzy RD estimates using local polynomial regression. Derivative of order " `deriv' "."
848
+ }
849
+ else {
850
+ if ("`deriv'"=="0") disp "Covariate-adjusted sharp RD estimates using local polynomial regression."
851
+ else if ("`deriv'"=="1") disp "Covariate-adjusted sharp kink RD estimates using local polynomial regression."
852
+ else disp "Covariate-adjusted sharp RD estimates using local polynomial regression. Derivative of order " `deriv' "."
853
+ }
854
+ }
855
+
856
+ disp ""
857
+ disp in smcl in gr "{ralign 18: Cutoff c = `c'}" _col(19) " {c |} " _col(21) in gr "Left of " in yellow "c" _col(33) in gr "Right of " in yellow "c" _col(55) in gr "Number of obs = " in yellow %10.0f scalar(N)
858
+ disp in smcl in gr "{hline 19}{c +}{hline 22}" _col(55) in gr "BW type = " in yellow "{ralign 10:`bwselect'}"
859
+ disp in smcl in gr "{ralign 18:Number of obs}" _col(19) " {c |} " _col(21) as result %9.0f scalar(N_l) _col(34) %9.0f scalar(N_r) _col(55) in gr "Kernel = " in yellow "{ralign 10:`kernel_type'}"
860
+ disp in smcl in gr "{ralign 18:Eff. Number of obs}" _col(19) " {c |} " _col(21) as result %9.0f scalar(N_h_l) _col(34) %9.0f scalar(N_h_r) _col(55) in gr "VCE method = " in yellow "{ralign 10:`vce_type'}"
861
+ disp in smcl in gr "{ralign 18:Order est. (p)}" _col(19) " {c |} " _col(21) as result %9.0f `p' _col(34) %9.0f `p'
862
+ disp in smcl in gr "{ralign 18:Order bias (q)}" _col(19) " {c |} " _col(21) as result %9.0f `q' _col(34) %9.0f `q'
863
+ disp in smcl in gr "{ralign 18:BW est. (h)}" _col(19) " {c |} " _col(21) as result %9.3f scalar(h_l) _col(34) %9.3f scalar(h_r)
864
+ disp in smcl in gr "{ralign 18:BW bias (b)}" _col(19) " {c |} " _col(21) as result %9.3f scalar(b_l) _col(34) %9.3f scalar(b_r)
865
+ disp in smcl in gr "{ralign 18:rho (h/b)}" _col(19) " {c |} " _col(21) as result %9.3f `rho_l' _col(34) %9.3f `rho_r'
866
+ if ("`masspoints'"=="check" | masspoints_found==1) disp in smcl in gr "{ralign 18:Unique obs}" _col(19) " {c |} " _col(21) as result %9.0f scalar(M_l) _col(34) %9.0f scalar(M_r)
867
+ if ("`cluster'"!="") disp in smcl in gr "{ralign 18:Number of clusters}" _col(19) " {c |} " _col(21) as result %9.0f scalar(g_l) _col(34) %9.0f scalar(g_r)
868
+ disp ""
869
+
870
+ if ("`fuzzy'"~="") {
871
+ disp in yellow "First-stage estimates. Outcome: `fuzzyvar'. Running variable: `x'."
872
+ disp in smcl in gr "{hline 19}{c TT}{hline 60}"
873
+ disp in smcl in gr "{ralign 18:Method}" _col(19) " {c |} " _col(24) "Coef." _col(33) `"Std. Err."' _col(46) "z" _col(52) "P>|z|" _col(61) `"[`level'% Conf. Interval]"'
874
+ disp in smcl in gr "{hline 19}{c +}{hline 60}"
875
+
876
+ if ("`all'"=="") {
877
+ disp in smcl in gr "{ralign 18:Conventional}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_T_cl) _col(33) %7.0g scalar(se_tau_T_cl) _col(43) %5.4f scalar(tau_T_cl/se_tau_T_cl) _col(52) %5.3f scalar(2*normal(-abs(tau_T_cl/se_tau_T_cl))) _col(60) %8.0g scalar(tau_T_cl) - scalar(quant*se_tau_T_cl) _col(73) %8.0g scalar(tau_T_cl + quant*se_tau_T_cl)
878
+ disp in smcl in gr "{ralign 18:Robust}" _col(19) " {c |} " _col(22) in ye %7.0g " -" _col(33) %7.0g " -" _col(43) %5.4f scalar(tau_T_bc/se_tau_T_rb) _col(52) %5.3f scalar(2*normal(-abs(tau_T_bc/se_tau_T_rb))) _col(60) %8.0g scalar(tau_T_bc - quant*se_tau_T_rb) _col(73) %8.0g scalar(tau_T_bc + quant*se_tau_T_rb)
879
+ }
880
+ else {
881
+ disp in smcl in gr "{ralign 18:Conventional}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_T_cl) _col(33) %7.0g scalar(se_tau_T_cl) _col(43) %5.4f scalar(tau_T_cl/se_tau_T_cl) _col(52) %5.3f scalar(2*normal(-abs(tau_T_cl/se_tau_T_cl))) _col(60) %8.0g scalar(tau_T_cl - quant*se_tau_T_cl) _col(73) %8.0g scalar(tau_T_cl + quant*se_tau_T_cl)
882
+ disp in smcl in gr "{ralign 18:Bias-corrected}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_T_bc) _col(33) %7.0g scalar(se_tau_T_cl) _col(43) %5.4f scalar(tau_T_bc/se_tau_T_cl) _col(52) %5.3f scalar(2*normal(-abs(tau_T_bc/se_tau_T_cl))) _col(60) %8.0g scalar(tau_T_bc - quant*se_tau_T_cl) _col(73) %8.0g scalar(tau_T_bc + quant*se_tau_T_cl)
883
+ disp in smcl in gr "{ralign 18:Robust}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_T_bc) _col(33) %7.0g scalar(se_tau_T_rb) _col(43) %5.4f scalar(tau_T_bc/se_tau_T_rb) _col(52) %5.3f scalar(2*normal(-abs(tau_T_bc/se_tau_T_rb))) _col(60) %8.0g scalar(tau_T_bc - quant*se_tau_T_rb) _col(73) %8.0g scalar(tau_T_bc + quant*se_tau_T_rb)
884
+ }
885
+ disp in smcl in gr "{hline 19}{c BT}{hline 60}"
886
+ disp ""
887
+ }
888
+
889
+ if ("`fuzzy'"=="") disp "Outcome: `y'. Running variable: `x'."
890
+ else disp in yellow "Treatment effect estimates. Outcome: `y'. Running variable: `x'. Treatment Status: `fuzzyvar'."
891
+
892
+ disp in smcl in gr "{hline 19}{c TT}{hline 60}"
893
+ disp in smcl in gr "{ralign 18:Method}" _col(19) " {c |} " _col(24) "Coef." _col(33) `"Std. Err."' _col(46) "z" _col(52) "P>|z|" _col(61) `"[`level'% Conf. Interval]"'
894
+ disp in smcl in gr "{hline 19}{c +}{hline 60}"
895
+
896
+ if ("`all'"=="") {
897
+ disp in smcl in gr "{ralign 18:Conventional}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_cl) _col(33) %7.0g scalar(se_tau_cl) _col(43) %5.4f scalar(tau_cl/se_tau_cl) _col(52) %5.3f scalar(2*normal(-abs(tau_cl/se_tau_cl))) _col(60) %8.0g scalar(tau_cl - quant*se_tau_cl) _col(73) %8.0g scalar(tau_cl + quant*se_tau_cl)
898
+ disp in smcl in gr "{ralign 18:Robust}" _col(19) " {c |} " _col(22) in ye %7.0g " -" _col(33) %7.0g " -" _col(43) %5.4f scalar(tau_bc/se_tau_rb) _col(52) %5.3f scalar(2*normal(-abs(tau_bc/se_tau_rb))) _col(60) %8.0g scalar(tau_bc - quant*se_tau_rb) _col(73) %8.0g scalar(tau_bc + quant*se_tau_rb)
899
+ }
900
+ else {
901
+ disp in smcl in gr "{ralign 18:Conventional}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_cl) _col(33) %7.0g scalar(se_tau_cl) _col(43) %5.4f scalar(tau_cl/se_tau_cl) _col(52) %5.3f scalar(2*normal(-abs(tau_cl/se_tau_cl))) _col(60) %8.0g scalar(tau_cl - quant*se_tau_cl) _col(73) %8.0g scalar(tau_cl + quant*se_tau_cl)
902
+ disp in smcl in gr "{ralign 18:Bias-corrected}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_bc) _col(33) %7.0g scalar(se_tau_cl) _col(43) %5.4f scalar(tau_bc/se_tau_cl) _col(52) %5.3f scalar(2*normal(-abs(tau_bc/se_tau_cl))) _col(60) %8.0g scalar(tau_bc - quant*se_tau_cl) _col(73) %8.0g scalar(tau_bc + quant*se_tau_cl)
903
+ disp in smcl in gr "{ralign 18:Robust}" _col(19) " {c |} " _col(22) in ye %7.0g scalar(tau_bc) _col(33) %7.0g scalar(se_tau_rb) _col(43) %5.4f scalar(tau_bc/se_tau_rb) _col(52) %5.3f scalar(2*normal(-abs(tau_bc/se_tau_rb))) _col(60) %8.0g scalar(tau_bc - quant*se_tau_rb) _col(73) %8.0g scalar(tau_bc + quant*se_tau_rb)
904
+ }
905
+ disp in smcl in gr "{hline 19}{c BT}{hline 60}"
906
+
907
+ if ("`covs'"!="") di "Covariate-adjusted estimates. Additional covariates included: `ncovs'"
908
+ * if (`covs_drop_coll'>=1) di "Variables dropped due to multicollinearity."
909
+ if ("`cluster'"!="") di "Std. Err. adjusted for clusters in " "`clustvar'"
910
+ if ("`scalepar'"!="1") di "Scale parameter: " `scalepar'
911
+ if ("`scaleregul'"!="1") di "Scale regularization: " `scaleregul'
912
+ if ("`masspoints'"=="check") di "Running variable checked for mass points."
913
+ if ("`masspoints'"=="adjust" & masspoints_found==1) di "Estimates adjusted for mass points in the running variable."
914
+
915
+ if ("`nowarnings'"!="") {
916
+ if (scalar(h_l)>=`range_l' | scalar(h_r)>=`range_r') disp in red "WARNING: bandwidth {it:h} greater than the range of the data."
917
+ if (scalar(b_l)>=`range_l' | scalar(b_r)>=`range_r') disp in red "WARNING: bandwidth {it:b} greater than the range of the data."
918
+ if (scalar(N_h_l)<20 | scalar(N_h_r)<20) disp in red "WARNING: bandwidth {it:h} too low."
919
+ if (scalar(N_b_l)<20 | scalar(N_b_r)<20) disp in red "WARNING: bandwidth {it:b} too low."
920
+ if ("`sharpbw'"~="") disp in red "WARNING: bandwidths automatically computed for sharp RD estimation."
921
+ if ("`perf_comp'"~="") disp in red "WARNING: bandwidths automatically computed for sharp RD estimation because perfect compliance was detected on at least one side of the threshold."
922
+ }
923
+
924
+ local ci_l_rb = round(scalar(tau_bc - quant*se_tau_rb),0.001)
925
+ local ci_r_rb = round(scalar(tau_bc + quant*se_tau_rb),0.001)
926
+
927
+ matrix rownames V = RD_Estimate
928
+ matrix colnames V = RD_Estimate
929
+ matrix colnames b = RD_Estimate
930
+
931
+ local tempo: colfullnames V
932
+ matrix rownames V = `tempo'
933
+
934
+ if ("`all'"~="") {
935
+ matrix rownames V = Conventional Bias-corrected Robust
936
+ matrix colnames V = Conventional Bias-corrected Robust
937
+ matrix colnames b = Conventional Bias-corrected Robust
938
+ }
939
+
940
+ restore
941
+
942
+ ereturn clear
943
+ cap ereturn post b V, esample(`touse')
944
+ ereturn scalar N = `N'
945
+ ereturn scalar N_l = scalar(N_l)
946
+ ereturn scalar N_r = scalar(N_r)
947
+ ereturn scalar N_h_l = scalar(N_h_l)
948
+ ereturn scalar N_h_r = scalar(N_h_r)
949
+ ereturn scalar N_b_l = scalar(N_b_l)
950
+ ereturn scalar N_b_r = scalar(N_b_r)
951
+ ereturn scalar c = `c'
952
+ ereturn scalar p = `p'
953
+ ereturn scalar q = `q'
954
+ ereturn scalar h_l = scalar(h_l)
955
+ ereturn scalar h_r = scalar(h_r)
956
+ ereturn scalar b_l = scalar(b_l)
957
+ ereturn scalar b_r = scalar(b_r)
958
+ ereturn scalar level = `level'
959
+ ereturn scalar tau_cl = scalar(tau_cl)
960
+ ereturn scalar tau_bc = scalar(tau_bc)
961
+ ereturn scalar tau_cl_l = scalar(tau_Y_cl_l)
962
+ ereturn scalar tau_cl_r = scalar(tau_Y_cl_r)
963
+ ereturn scalar tau_bc_l = scalar(tau_Y_bc_l)
964
+ ereturn scalar tau_bc_r = scalar(tau_Y_bc_r)
965
+ ereturn scalar bias_l = scalar(bias_l)
966
+ ereturn scalar bias_r = scalar(bias_r)
967
+ ereturn scalar se_tau_cl = scalar(se_tau_cl)
968
+ ereturn scalar se_tau_rb = scalar(se_tau_rb)
969
+ ereturn scalar ci_l_cl = scalar(tau_cl - quant*se_tau_cl)
970
+ ereturn scalar ci_r_cl = scalar(tau_cl + quant*se_tau_cl)
971
+ ereturn scalar pv_cl = scalar(2*normal(-abs(tau_cl/se_tau_cl)))
972
+ ereturn scalar ci_l_rb = scalar(tau_bc - quant*se_tau_rb)
973
+ ereturn scalar ci_r_rb = scalar(tau_bc + quant*se_tau_rb)
974
+ ereturn scalar pv_rb = scalar(2*normal(-abs(tau_bc/se_tau_rb)))
975
+
976
+ if ("`fuzzy'"!="") {
977
+ ereturn scalar tau_T_cl = scalar(tau_T_cl)
978
+ ereturn scalar tau_T_bc = scalar(tau_T_bc)
979
+ ereturn scalar se_tau_T_cl = scalar(se_tau_T_cl)
980
+ ereturn scalar se_tau_T_rb = scalar(se_tau_T_rb)
981
+
982
+ ereturn scalar tau_T_cl_l = scalar(tau_T_cl_l)
983
+ ereturn scalar tau_T_cl_r = scalar(tau_T_cl_r)
984
+ ereturn scalar tau_T_bc_l = scalar(tau_T_bc_l)
985
+ ereturn scalar tau_T_bc_r = scalar(tau_T_bc_r)
986
+ }
987
+
988
+ ereturn matrix beta_p_r = beta_p_r
989
+ ereturn matrix beta_p_l = beta_p_l
990
+
991
+ ereturn matrix V_cl_l = V_Y_cl_l
992
+ ereturn matrix V_cl_r = V_Y_cl_r
993
+ ereturn matrix V_rb_l = V_Y_bc_l
994
+ ereturn matrix V_rb_r = V_Y_bc_r
995
+
996
+ ereturn local ci_rb [`ci_l_rb' ; `ci_r_rb']
997
+ ereturn local kernel = "`kernel_type'"
998
+ ereturn local bwselect = "`bwselect'"
999
+ ereturn local vce_select = "`vce_type'"
1000
+ if ("`covs'"!="") ereturn local covs "`covs_list'"
1001
+ if ("`cluster'"!="") ereturn local clustvar "`clustvar'"
1002
+ ereturn local outcomevar "`y'"
1003
+ ereturn local runningvar "`x'"
1004
+ ereturn local depvar "`y'"
1005
+ ereturn local cmd "rdrobust"
1006
+
1007
+ mata mata clear
1008
+
1009
+ end
30/replication_package/Adofiles/rd_2021/rdrobust.sthlp ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *!version 8.1.0 2021-02-22}{...}
3
+ {viewerjumpto "Syntax" "rdrobust##syntax"}{...}
4
+ {viewerjumpto "Description" "rdrobust##description"}{...}
5
+ {viewerjumpto "Options" "rdrobust##options"}{...}
6
+ {viewerjumpto "Examples" "rdrobust##examples"}{...}
7
+ {viewerjumpto "Stored results" "rdrobust##stored_results"}{...}
8
+ {viewerjumpto "References" "rdrobust##references"}{...}
9
+ {viewerjumpto "Authors" "rdrobust##authors"}{...}
10
+
11
+
12
+ {title:Title}
13
+
14
+ {p 4 8}{cmd:rdrobust} {hline 2} Local Polynomial Regression Discontinuity Estimation with Robust Bias-Corrected Confidence Intervals and Inference Procedures.{p_end}
15
+
16
+ {marker syntax}{...}
17
+ {title:Syntax}
18
+
19
+ {p 4 8}{cmd:rdrobust} {it:depvar} {it:runvar} {ifin}
20
+ [{cmd:,}
21
+ {cmd:c(}{it:#}{cmd:)}
22
+ {cmd:fuzzy(}{it:fuzzyvar [sharpbw]}{cmd:)}
23
+ {cmd:deriv(}{it:#}{cmd:)}
24
+ {cmd:scalepar(}{it:#}{cmd:)}
25
+ {cmd:p(}{it:#}{cmd:)}
26
+ {cmd:q(}{it:#}{cmd:)}
27
+ {cmd:h(}{it:# #}{cmd:)}
28
+ {cmd:b(}{it:# #}{cmd:)}
29
+ {cmd:rho(}{it:#}{cmd:)}
30
+ {cmd:covs(}{it:covars}{cmd:)}
31
+ {cmd:covs_drop(}{it:covsdropoption}{cmd:)}
32
+ {cmd:kernel(}{it:kernelfn}{cmd:)}
33
+ {cmd:weights(}{it:weightsvar}{cmd:)}
34
+ {cmd:bwselect(}{it:bwmethod}{cmd:)}
35
+ {cmd:scaleregul(}{it:#}{cmd:)}
36
+ {cmd:masspoints(}{it:masspointsoption}{cmd:)}
37
+ {cmd:bwcheck(}{it:#}{cmd:)}
38
+ {cmd:bwrestrict(}{it:bwropt}{cmd:)}
39
+ {cmd:stdvars(}{it:stdopt}{cmd:)}
40
+ {cmd:vce(}{it:vcetype [vceopt1 vceopt2]}{cmd:)}
41
+ {cmd:level(}{it:#}{cmd:)}
42
+ {cmd:all}
43
+ ]{p_end}
44
+
45
+ {synoptset 28 tabbed}{...}
46
+
47
+ {marker description}{...}
48
+ {title:Description}
49
+
50
+ {p 4 8}{cmd:rdrobust} implements local polynomial Regression Discontinuity (RD) point estimators with robust bias-corrected confidence intervals and inference procedures developed in
51
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_ECMA.pdf":Calonico, Cattaneo and Titiunik (2014a)},
52
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":Calonico, Cattaneo and Farrell (2018)},
53
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2019_RESTAT.pdf":Calonico, Cattaneo, Farrell and Titiunik (2019)},
54
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_ECTJ.pdf":Calonico, Cattaneo and Farrell (2020)}.
55
+ It also computes alternative estimation and inference procedures available in the literature.{p_end}
56
+
57
+ {p 8 8} Companion commands are: {help rdbwselect:rdbwselect} for data-driven bandwidth selection, and {help rdplot:rdplot} for data-driven RD plots (see
58
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_JASA.pdf":Calonico, Cattaneo and Titiunik (2015a)} for details).{p_end}
59
+
60
+ {p 8 8}A detailed introduction to this command is given in
61
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Calonico, Cattaneo and Titiunik (2014b)},
62
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2017_Stata.pdf":Calonico, Cattaneo, Farrell and Titiunik (2017)}. A companion {browse "www.r-project.org":R} package is also described in
63
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_R.pdf":Calonico, Cattaneo and Titiunik (2015b)}.{p_end}
64
+
65
+ {p 4 8}Related Stata and R packages useful for inference in RD designs are described in the following website:{p_end}
66
+
67
+ {p 8 8}{browse "https://rdpackages.github.io/":https://rdpackages.github.io/}{p_end}
68
+
69
+
70
+ {marker options}{...}
71
+ {title:Options}
72
+
73
+ {dlgtab:Estimand}
74
+
75
+ {p 4 8}{cmd:c(}{it:#}{cmd:)} specifies the RD cutoff for {it:indepvar}.
76
+ Default is {cmd:c(0)}.{p_end}
77
+
78
+ {p 4 8}{cmd:fuzzy(}{it:fuzzyvar [sharpbw]}{cmd:)} specifies the treatment status variable used to implement fuzzy RD estimation (or Fuzzy Kink RD if {cmd:deriv(1)} is also specified).
79
+ Default is Sharp RD design and hence this option is not used.
80
+ If the option {it:sharpbw} is set, the fuzzy RD estimation is performed using a bandwidth selection procedure for the sharp RD model. This option is automatically selected if there is perfect compliance at either side of the threshold.
81
+ {p_end}
82
+
83
+ {p 4 8}{cmd:deriv(}{it:#}{cmd:)} specifies the order of the derivative of the regression functions to be estimated.
84
+ Default is {cmd:deriv(0)} (for Sharp RD, or for Fuzzy RD if {cmd:fuzzy(.)} is also specified). Setting {cmd:deriv(1)} results in estimation of a Kink RD design (up to scale), or Fuzzy Kink RD if {cmd:fuzzy(.)} is also specified.{p_end}
85
+
86
+ {p 4 8}{cmd:scalepar(}{it:#}{cmd:)} specifies scaling factor for RD parameter of interest. This option is useful when the estimator of interest requires a known multiplicative factor rescaling (e.g., Sharp Kink RD).
87
+ Default is {cmd:scalepar(1)} (no rescaling).{p_end}
88
+
89
+ {dlgtab:Local Polynomial Regression}
90
+
91
+ {p 4 8}{cmd:p(}{it:#}{cmd:)} specifies the order of the local polynomial used to construct the point estimator.
92
+ Default is {cmd:p(1)} (local linear regression).{p_end}
93
+
94
+ {p 4 8}{cmd:q(}{it:#}{cmd:)} specifies the order of the local polynomial used to construct the bias correction.
95
+ Default is {cmd:q(2)} (local quadratic regression).{p_end}
96
+
97
+ {p 4 8}{cmd:h(}{it:# #}{cmd:)} specifies the main bandwidth ({it:h}) used to construct the RD point estimator. If not specified, bandwidth {it:h} is computed by the companion command {help rdbwselect:rdbwselect}.
98
+ If two bandwidths are specified, the first bandwidth is used for the data below the cutoff and the second bandwidth is used for the data above the cutoff.{p_end}
99
+
100
+ {p 4 8}{cmd:b(}{it:# #}{cmd:)} specifies the bias bandwidth ({it:b}) used to construct the bias-correction estimator. If not specified, bandwidth {it:b} is computed by the companion command {help rdbwselect:rdbwselect}.
101
+ If two bandwidths are specified, the first bandwidth is used for the data below the cutoff and the second bandwidth is used for the data above the cutoff.{p_end}
102
+
103
+ {p 4 8}{cmd:rho(}{it:#}{cmd:)} specifies the value of {it:rho}, so that the bias bandwidth {it:b} equals {it:b}={it:h}/{it:rho}.
104
+ Default is {cmd:rho(1)} if {it:h} is specified but {it:b} is not.{p_end}
105
+
106
+ {p 4 8}{cmd:covs(}{it:covars}{cmd:)} specifies additional covariates to be used for estimation and inference.{p_end}
107
+
108
+ {p 4 8}{cmd:covs_drop(}{it:covsdropoption}{cmd:)} assess collinearity in additional covariates used for estimation and inference. Options {opt pinv} (default choice) and {opt invsym} drops collinear additional covariates, differing only in the type of inverse function used. Option {opt off} only checks collinear additional covariates but does not drop them.{p_end}
109
+
110
+ {p 4 8}{cmd:kernel(}{it:kernelfn}{cmd:)} specifies the kernel function used to construct the local-polynomial estimator(s). Options are: {opt tri:angular}, {opt epa:nechnikov}, and {opt uni:form}.
111
+ Default is {cmd:kernel(triangular)}.{p_end}
112
+
113
+ {p 4 8}{cmd:weights(}{it:weightsvar}{cmd:)} is the variable used for optional weighting of the estimation procedure. The unit-specific weights multiply the kernel function.{p_end}
114
+
115
+ {dlgtab:Bandwidth Selection}
116
+
117
+ {p 4 8}{cmd:bwselect(}{it:bwmethod}{cmd:)} specifies the bandwidth selection procedure to be used. By default it computes both {it:h} and {it:b}, unless {it:rho} is specified, in which case it only computes {it:h} and sets {it:b}={it:h}/{it:rho}.
118
+ Options are:{p_end}
119
+ {p 8 12}{opt mserd} one common MSE-optimal bandwidth selector for the RD treatment effect estimator.{p_end}
120
+ {p 8 12}{opt msetwo} two different MSE-optimal bandwidth selectors (below and above the cutoff) for the RD treatment effect estimator.{p_end}
121
+ {p 8 12}{opt msesum} one common MSE-optimal bandwidth selector for the sum of regression estimates (as opposed to difference thereof).{p_end}
122
+ {p 8 12}{opt msecomb1} for min({opt mserd},{opt msesum}).{p_end}
123
+ {p 8 12}{opt msecomb2} for median({opt msetwo},{opt mserd},{opt msesum}), for each side of the cutoff separately.{p_end}
124
+ {p 8 12}{opt cerrd} one common CER-optimal bandwidth selector for the RD treatment effect estimator.{p_end}
125
+ {p 8 12}{opt certwo} two different CER-optimal bandwidth selectors (below and above the cutoff) for the RD treatment effect estimator.{p_end}
126
+ {p 8 12}{opt cersum} one common CER-optimal bandwidth selector for the sum of regression estimates (as opposed to difference thereof).{p_end}
127
+ {p 8 12}{opt cercomb1} for min({opt cerrd},{opt cersum}).{p_end}
128
+ {p 8 12}{opt cercomb2} for median({opt certwo},{opt cerrd},{opt cersum}), for each side of the cutoff separately.{p_end}
129
+ {p 8 12}Note: MSE = Mean Square Error; CER = Coverage Error Rate.{p_end}
130
+ {p 8 12}Default is {cmd:bwselect(mserd)}. For details on implementation see
131
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_ECMA.pdf":Calonico, Cattaneo and Titiunik (2014a)},
132
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":Calonico, Cattaneo and Farrell (2017)},
133
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_ECTJ.pdf":Calonico, Cattaneo and Farrell (2020)},
134
+ and {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2019_RESTAT.pdf":Calonico, Cattaneo, Farrell and Titiunik (2019)},
135
+ and the companion software articles.{p_end}
136
+
137
+ {p 4 8}{cmd:scaleregul(}{it:#}{cmd:)} specifies scaling factor for the regularization term added to the denominator of the bandwidth selectors. Setting {cmd:scaleregul(0)} removes the regularization term from the bandwidth selectors.
138
+ Default is {cmd:scaleregul(1)}.{p_end}
139
+
140
+ {p 4 8}{cmd:masspoints(}{it:masspointsoption}{cmd:)} checks and controls for repeated observations in the running variable.
141
+ Options are:{p_end}
142
+ {p 8 12}{opt off} ignores the presence of mass points. {p_end}
143
+ {p 8 12}{opt check} looks for and reports the number of unique observations at each side of the cutoff. {p_end}
144
+ {p 8 12}{opt adjust} controls that the preliminary bandwidths used in the calculations contain a minimal number of unique observations. By default it uses 10 observations, but it can be manually adjusted with the option {cmd:bwcheck}.{p_end}
145
+ {p 8 12} Default option is {cmd:masspoints(adjust)}.{p_end}
146
+
147
+ {p 4 8}{cmd:bwcheck(}{it:bwcheck}{cmd:)} if a positive integer is provided, the preliminary bandwidth used in the calculations is enlarged so that at least {it:bwcheck} unique observations are used. {p_end}
148
+
149
+ {p 4 8}{cmd:bwrestrict(}{it:bwropt}{cmd:)} if set {opt on}, computed bandwidths are restricted to lie within the range of {it:runvar}. Default is {opt on}.{p_end}
150
+
151
+ {p 4 8}{cmd:stdvars(}{it:stdopt}{cmd:)} if set {opt on}, {it:depvar} and {it:runvar} are standardized before computing the bandwidths. Default is {opt off}.{p_end}
152
+
153
+ {dlgtab:Variance-Covariance Estimation}
154
+
155
+ {p 4 8}{cmd:vce(}{it:vcetype [vceopt1 vceopt2]}{cmd:)} specifies the procedure used to compute the variance-covariance matrix estimator.
156
+ Options are:{p_end}
157
+ {p 8 12}{cmd:vce(nn }{it:[nnmatch]}{cmd:)} for heteroskedasticity-robust nearest neighbor variance estimator with {it:nnmatch} indicating the minimum number of neighbors to be used.{p_end}
158
+ {p 8 12}{cmd:vce(hc0)} for heteroskedasticity-robust plug-in residuals variance estimator without weights.{p_end}
159
+ {p 8 12}{cmd:vce(hc1)} for heteroskedasticity-robust plug-in residuals variance estimator with {it:hc1} weights.{p_end}
160
+ {p 8 12}{cmd:vce(hc2)} for heteroskedasticity-robust plug-in residuals variance estimator with {it:hc2} weights.{p_end}
161
+ {p 8 12}{cmd:vce(hc3)} for heteroskedasticity-robust plug-in residuals variance estimator with {it:hc3} weights.{p_end}
162
+ {p 8 12}{cmd:vce(nncluster }{it:clustervar [nnmatch]}{cmd:)} for cluster-robust nearest neighbor variance estimation using with {it:clustervar} indicating the cluster ID variable and {it: nnmatch} matches indicating the minimum number of neighbors to be used.{p_end}
163
+ {p 8 12}{cmd:vce(cluster }{it:clustervar}{cmd:)} for cluster-robust plug-in residuals variance estimation with degrees-of-freedom weights and {it:clustervar} indicating the cluster ID variable.{p_end}
164
+ {p 8 12}Default is {cmd:vce(nn 3)}.{p_end}
165
+
166
+ {p 4 8}{cmd:level(}{it:#}{cmd:)} specifies confidence level for confidence intervals.
167
+ Default is {cmd:level(95)}.{p_end}
168
+
169
+ {dlgtab:Other Options}
170
+
171
+ {p 4 8}{cmd:all} if specified, {cmd:rdrobust} reports three different procedures:{p_end}
172
+ {p 8 12} (i) conventional RD estimates with conventional variance estimator.{p_end}
173
+ {p 8 12} (ii) bias-corrected RD estimates with conventional variance estimator.{p_end}
174
+ {p 8 12} (iii) bias-corrected RD estimates with robust variance estimator.{p_end}
175
+
176
+ {hline}
177
+
178
+
179
+ {marker examples}{...}
180
+ {title:Example: Cattaneo, Frandsen and Titiunik (2015) Incumbency Data}
181
+
182
+ {p 4 8}Setup{p_end}
183
+ {p 8 8}{cmd:. use rdrobust_senate.dta}{p_end}
184
+
185
+ {p 4 8}Robust RD Estimation using MSE bandwidth selection procedure{p_end}
186
+ {p 8 8}{cmd:. rdrobust vote margin}{p_end}
187
+
188
+ {p 4 8}Robust RD Estimation with both bandwidths set to 15{p_end}
189
+ {p 8 8}{cmd:. rdrobust vote margin, h(15)}{p_end}
190
+
191
+ {p 4 8}Other generic examples ({cmd:y} outcome variable, {cmd:x} running variable, {cmd:t} treatment take-up indicator):
192
+
193
+ {p 8 8}Estimation for Sharp RD designs{p_end}
194
+ {p 12 12}{cmd:. rdrobust y x, deriv(0)}{p_end}
195
+
196
+ {p 8 8}Estimation for Sharp Kink RD designs{p_end}
197
+ {p 12 12}{cmd:. rdrobust y x, deriv(1)}{p_end}
198
+
199
+ {p 8 8}Estimation for Fuzzy RD designs{p_end}
200
+ {p 12 12}{cmd:. rdrobust y x, fuzzy(t)}{p_end}
201
+
202
+ {p 8 8}Estimation for Fuzzy Kink RD designs{p_end}
203
+ {p 12 12}{cmd:. rdrobust y x, fuzzy(t) deriv(1)}{p_end}
204
+
205
+
206
+ {marker stored_results}{...}
207
+ {title:Stored results}
208
+
209
+ {p 4 8}{cmd:rdrobust} stores the following in {cmd:e()}:
210
+
211
+ {synoptset 20 tabbed}{...}
212
+ {p2col 5 20 24 2: Scalars}{p_end}
213
+ {synopt:{cmd:e(N)}}original number of observations{p_end}
214
+ {synopt:{cmd:e(N_l)}}original number of observations to the left of the cutoff{p_end}
215
+ {synopt:{cmd:e(N_r)}}original number of observations to the right of the cutoff{p_end}
216
+ {synopt:{cmd:e(N_h_l)}}effective number of observations (given by the bandwidth h_l) used to the left of the cutoff{p_end}
217
+ {synopt:{cmd:e(N_h_r)}}effective number of observations (given by the bandwidth h_r) used to the right of the cutoff{p_end}
218
+ {synopt:{cmd:e(N_b_l)}}effective number of observations (given by the bandwidth b_l) used to the left of the cutoff{p_end}
219
+ {synopt:{cmd:e(N_b_r)}}effective number of observations (given by the bandwidth b_r) used to the right of the cutoff{p_end}
220
+ {synopt:{cmd:e(c)}}cutoff value{p_end}
221
+ {synopt:{cmd:e(p)}}order of the polynomial used for estimation of the regression function{p_end}
222
+ {synopt:{cmd:e(q)}}order of the polynomial used for estimation of the bias of the regression function estimator{p_end}
223
+ {synopt:{cmd:e(h_l)}}bandwidth used for estimation of the regression function below the cutoff{p_end}
224
+ {synopt:{cmd:e(h_r)}}bandwidth used for estimation of the regression function above the cutoff{p_end}
225
+ {synopt:{cmd:e(b_l)}}bandwidth used for estimation of the bias of the regression function estimator below the cutoff{p_end}
226
+ {synopt:{cmd:e(b_r)}}bandwidth used for estimation of the bias of the regression function estimator above the cutoff{p_end}
227
+ {synopt:{cmd:e(tau_cl)}}conventional local-polynomial RD estimate{p_end}
228
+ {synopt:{cmd:e(tau_cl_l)}}conventional local-polynomial left estimate{p_end}
229
+ {synopt:{cmd:e(tau_cl_r)}}conventional local-polynomial right estimate{p_end}
230
+ {synopt:{cmd:e(tau_bc)}}bias-corrected local-polynomial RD estimate{p_end}
231
+ {synopt:{cmd:e(tau_bc_l)}}bias-corrected local-polynomial left estimate{p_end}
232
+ {synopt:{cmd:e(tau_bc_r)}}bias-corrected local-polynomial right estimate{p_end}
233
+ {synopt:{cmd:e(se_tau_cl)}}conventional standard error of the local-polynomial RD estimator{p_end}
234
+ {synopt:{cmd:e(se_tau_rb)}}robust standard error of the local-polynomial RD estimator{p_end}
235
+ {synopt:{cmd:e(bias_l)}}estimated bias for the local-polynomial RD estimator below the cutoff{p_end}
236
+ {synopt:{cmd:e(bias_r)}}estimated bias for the local-polynomial RD estimator above the cutoff{p_end}
237
+
238
+ {p2col 5 20 24 2: Macros}{p_end}
239
+ {synopt:{cmd:e(runningvar)}}name of running variable{p_end}
240
+ {synopt:{cmd:e(outcomevar)}}name of outcome variable{p_end}
241
+ {synopt:{cmd:e(clustvar)}}name of cluster variable{p_end}
242
+ {synopt:{cmd:e(covs)}}name of covariates{p_end}
243
+ {synopt:{cmd:e(vce_select)}}vcetype specified in vce(){p_end}
244
+ {synopt:{cmd:e(bwselect)}}bandwidth selection choice{p_end}
245
+ {synopt:{cmd:e(kernel)}}kernel choice{p_end}
246
+
247
+ {p2col 5 20 24 2: Matrices}{p_end}
248
+ {synopt:{cmd:e(beta_p_r)}}conventional p-order local-polynomial estimates to the right of the cutoff{p_end}
249
+ {synopt:{cmd:e(beta_p_l)}}conventional p-order local-polynomial estimates to the left of the cutoff{p_end}
250
+ {synopt:{cmd:e(V_cl_r)}}conventional variance-covariance matrix to the right of the cutoff{p_end}
251
+ {synopt:{cmd:e(V_cl_l)}}conventional variance-covariance matrix to the left of the cutoff{p_end}
252
+ {synopt:{cmd:e(V_rb_r)}}robust variance-covariance matrix to the right of the cutoff{p_end}
253
+ {synopt:{cmd:e(V_rb_l)}}robust variance-covariance matrix to the left of the cutoff{p_end}
254
+
255
+ {marker references}{...}
256
+ {title:References}
257
+
258
+ {p 4 8}Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2020.
259
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2020_ECTJ.pdf":Optimal Bandwidth Choice for Robust Bias Corrected Inference in Regression Discontinuity Designs}.
260
+ {it:Econometrics Journal} 23(2): 192-210.{p_end}
261
+
262
+ {p 4 8}Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2018.
263
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf":On the Effect of Bias Estimation on Coverage Accuracy in Nonparametric Inference}.
264
+ {it:Journal of the American Statistical Association} 113(522): 767-779.{p_end}
265
+
266
+ {p 4 8}Calonico, S., M. D. Cattaneo, M. H. Farrell, and R. Titiunik. 2019.
267
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2019_RESTAT.pdf":Regression Discontinuity Designs using Covariates}.
268
+ {it:Review of Economics and Statistics}, 101(3): 442-451.{p_end}
269
+
270
+ {p 4 8}Calonico, S., M. D. Cattaneo, M. H. Farrell, and R. Titiunik. 2017.
271
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Farrell-Titiunik_2017_Stata.pdf":rdrobust: Software for Regression Discontinuity Designs}.
272
+ {it:Stata Journal} 17(2): 372-404.{p_end}
273
+
274
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2014a.
275
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_ECMA.pdf":Robust Nonparametric Confidence Intervals for Regression-Discontinuity Designs}.
276
+ {it:Econometrica} 82(6): 2295-2326.{p_end}
277
+
278
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2014b.
279
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2014_Stata.pdf":Robust Data-Driven Inference in the Regression-Discontinuity Design}.
280
+ {it:Stata Journal} 14(4): 909-946.{p_end}
281
+
282
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2015a.
283
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_JASA.pdf":Optimal Data-Driven Regression Discontinuity Plots}.
284
+ {it:Journal of the American Statistical Association} 110(512): 1753-1769.{p_end}
285
+
286
+ {p 4 8}Calonico, S., M. D. Cattaneo, and R. Titiunik. 2015b.
287
+ {browse "https://rdpackages.github.io/references/Calonico-Cattaneo-Titiunik_2015_R.pdf":rdrobust: An R Package for Robust Nonparametric Inference in Regression-Discontinuity Designs}.
288
+ {it:R Journal} 7(1): 38-51.{p_end}
289
+
290
+ {p 4 8}Cattaneo, M. D., B. Frandsen, and R. Titiunik. 2015.
291
+ {browse "https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf":Randomization Inference in the Regression Discontinuity Design: An Application to Party Advantages in the U.S. Senate}.
292
+ {it:Journal of Causal Inference} 3(1): 1-24.{p_end}
293
+
294
+ {marker authors}{...}
295
+ {title:Authors}
296
+
297
+ {p 4 8}Sebastian Calonico, Columbia University, New York, NY.
298
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
299
+
300
+ {p 4 8}Matias D. Cattaneo, Princeton University, Princeton, NJ.
301
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
302
+
303
+ {p 4 8}Max H. Farrell, University of Chicago, Chicago, IL.
304
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
305
+
306
+ {p 4 8}Rocio Titiunik, Princeton University, Princeton, NJ.
307
+ {browse "mailto:[email protected]":[email protected]}.{p_end}
308
+
309
+
30/replication_package/Adofiles/rd_2021/rdrobust_bw.mo ADDED
Binary file (15.8 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdrobust_kweight.mo ADDED
Binary file (2.8 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdrobust_res.mo ADDED
Binary file (7.95 kB). View file
 
30/replication_package/Adofiles/rd_2021/rdrobust_vce.mo ADDED
Binary file (5.31 kB). View file
 
30/replication_package/Adofiles/reghdfe_2019/reghdfe.ado ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *! version 5.7.3 13nov2019
2
+
3
+ program reghdfe, eclass
4
+ * Intercept old+version
5
+ cap syntax, version old
6
+ if !c(rc) {
7
+ reghdfe_old, version
8
+ exit
9
+ }
10
+
11
+ * Intercept old
12
+ cap syntax anything(everything) [fw aw pw/], [*] old
13
+ if !c(rc) {
14
+ di as error "(running historical version of reghdfe)"
15
+ if ("`weight'"!="") local weightexp [`weight'=`exp']
16
+ reghdfe_old `anything' `weightexp', `options'
17
+ exit
18
+ }
19
+
20
+ * Aux. subcommands
21
+ cap syntax, [*]
22
+ if inlist("`options'", "check", "compile", "reload", "update", "version", "requirements", "store_alphas") {
23
+ if ("`options'"=="compile") loc args force
24
+ if ("`options'"=="check") loc options compile
25
+ if ("`options'"=="update") {
26
+ loc args 1
27
+ loc options reload
28
+ }
29
+ loc subcmd = proper("`options'")
30
+ `subcmd' `args'
31
+ }
32
+ else if replay() {
33
+ Replay `0'
34
+ }
35
+ else {
36
+ Cleanup 0
37
+ ms_get_version ftools, min_version("2.36.1") // Compile // takes 0.01s to run this useful check (ensures .mlib exists)
38
+ cap noi Estimate `0'
39
+ Cleanup `c(rc)'
40
+ }
41
+ end
42
+
43
+
44
+ program Compile
45
+ args force
46
+
47
+ * Check dependencies
48
+ ftools, check // in case lftools.mlib does not exist or is outdated
49
+ ms_get_version ftools, min_version("2.34.0")
50
+ ms_get_version reghdfe // save local package_version
51
+ loc list_objects "FixedEffects() fixed_effects() BipartiteGraph()"
52
+ loc list_functions "reghdfe_*() transform_*() accelerate_*() panelmean() panelsolve_*() lsmr()"
53
+ loc list_misc "weighted_quadcolsum() safe_divide() check_convergence() precompute_inv_xx() _st_data_wrapper()"
54
+ // TODO: prefix everything with reghdfe_*
55
+
56
+ ms_compile_mata, ///
57
+ package(reghdfe) ///
58
+ version(`package_version') ///
59
+ fun("`list_objects' `list_functions' `list_misc'") ///
60
+ verbose ///
61
+ `force'
62
+ end
63
+
64
+
65
+ program Reload
66
+ * Internal debugging tool.
67
+ * Updates dependencies and reghdfe from local path or from github
68
+ * Usage:
69
+ * reghdfe, update // from c:\git\..
70
+ * reghdfe, reload // from github
71
+
72
+ args online
73
+ if ("`online'" == "") loc online 0
74
+
75
+ di as text _n "{bf:reghdfe: updating required packages}"
76
+ di as text "{hline 64}"
77
+
78
+ * -ftools- https://github.com/sergiocorreia/ftools/
79
+ cap ado uninstall ftools
80
+ if (`online') net install ftools, from("https://github.com/sergiocorreia/ftools/raw/master/src/")
81
+ if (!`online') net install ftools, from("c:\git\ftools\src")
82
+ di as text "{hline 64}"
83
+ ftools, compile
84
+ di as text "{hline 64}"
85
+
86
+ * Update -reghdfe-
87
+ di as text _n _n "{bf:reghdfe: updating self}"
88
+ di as text "{hline 64}"
89
+ qui ado uninstall reghdfe
90
+ if (`online') net install reghdfe, from("https://github.com/sergiocorreia/reghdfe/raw/master/src/")
91
+ if (!`online') net install reghdfe, from("c:\git\reghdfe\src")
92
+ qui which reghdfe
93
+ di as text "{hline 64}"
94
+ reghdfe, compile
95
+ di as text "{hline 64}"
96
+
97
+ * Cleaning up
98
+ di as text _n "{bf:Note:} You need to run {stata program drop _all} now."
99
+ end
100
+
101
+
102
+ program Version
103
+ which reghdfe
104
+ Requirements
105
+ end
106
+
107
+
108
+ program Requirements
109
+ di as text _n "Required packages installed?"
110
+ loc reqs ftools
111
+ // ivreg2 avar tuples group3hdfe
112
+ if (c(stata_version)<13) loc reqs `reqs' boottest
113
+
114
+ loc ftools_github "https://github.com/sergiocorreia/ftools/raw/master/src/"
115
+
116
+ loc error 0
117
+
118
+ foreach req of local reqs {
119
+ loc fn `req'.ado
120
+ cap findfile `fn'
121
+ if (_rc) {
122
+ loc error 1
123
+ di as text "{lalign 20:- `req'}" as error "not" _c
124
+ di as text " {stata ssc install `req':install from SSC}" _c
125
+ if inlist("`req'", "ftools") {
126
+ loc github ``req'_github'
127
+ di as text `" {stata `"net install `req', from(`"`github'"')"':install from github}"'
128
+ }
129
+ else {
130
+ di as text // newline
131
+ }
132
+ }
133
+ else {
134
+ di as text "{lalign 20:- `req'}" as text "yes"
135
+ }
136
+ }
137
+
138
+ if (`error') exit 601
139
+ end
140
+
141
+
142
+ program Store_Alphas, eclass
143
+ mata: st_local("save_any_fe", strofreal(HDFE.save_any_fe))
144
+ assert inlist(`save_any_fe', 0, 1)
145
+ if (`save_any_fe') {
146
+ _assert e(depvar) != "", msg("e(depvar) is empty")
147
+ _assert e(resid) != "", msg("e(resid) is empty")
148
+ // we can't use -confirm var- because it might have TS operators
149
+ fvrevar `e(depvar)', list
150
+ confirm numeric var `e(resid)', exact
151
+ tempvar d
152
+ if (e(rank)) {
153
+ qui _predict double `d' if e(sample), xb
154
+ }
155
+ else if (e(report_constant)) {
156
+ gen double `d' = _b[_cons] if e(sample)
157
+ }
158
+ else {
159
+ gen double `d' = 0 if e(sample)
160
+ }
161
+ qui replace `d' = `e(depvar)' - `d' - `e(resid)' if e(sample)
162
+
163
+ mata: HDFE.store_alphas("`d'")
164
+ drop `d'
165
+
166
+ // Drop resid if we don't want to save it; and update e(resid)
167
+ cap drop __temp_reghdfe_resid__
168
+ if (!c(rc)) ereturn local resid
169
+ }
170
+ end
171
+
172
+
173
+ program Cleanup
174
+ args rc
175
+ cap mata: mata drop HDFE
176
+ cap mata: mata drop hdfe_*
177
+ cap drop __temp_reghdfe_resid__
178
+ cap matrix drop reghdfe_statsmatrix
179
+ if (`rc' == 132) {
180
+ di as text "- If you got the {it:parentheses unbalanced} error, note that IV/2SLS was moved to {help ivreghdfe}"
181
+ di as smcl `"- Latest version: {browse "https://github.com/sergiocorreia/ivreghdfe":https://github.com/sergiocorreia/ivreghdfe}"'
182
+ di as smcl `"- SSC version: {stata "net describe ivreghdfe, from(http://fmwww.bc.edu/RePEc/bocode/i)"}"'
183
+ di as smcl `"- Note: the older functionality can still be accessed through the {it:old} option"'
184
+ }
185
+ if (`rc') exit `rc'
186
+ end
187
+
188
+
189
+ program Parse
190
+ * Trim whitespace (caused by "///" line continuations; aesthetic only)
191
+ mata: st_local("0", stritrim(st_local("0")))
192
+
193
+ * Main syntax
194
+ #d;
195
+ syntax varlist(fv ts numeric) [if] [in] [aw pw fw/] , [
196
+
197
+ /* Model */
198
+ Absorb(string) NOAbsorb
199
+ SUmmarize SUmmarize2(string asis) /* simulate implicit options */
200
+
201
+ /* Standard Errors */
202
+ VCE(string) CLuster(string)
203
+
204
+ /* Diagnostic */
205
+ Verbose(numlist min=1 max=1 >=-1 <=5 integer)
206
+ TIMEit
207
+
208
+ /* Speedup and memory Tricks */
209
+ NOSAMPle /* do not save e(sample) */
210
+ COMPACT /* use as little memory as possible but is slower */
211
+
212
+ /* Extra display options (based on regress) */
213
+ noHEader noTABle noFOOTnote
214
+
215
+ /* Undocumented */
216
+ KEEPSINgletons
217
+ OLD /* use latest v3 */
218
+ NOTES(string) /* NOTES(key=value ...), will be stored on e() */
219
+
220
+ ] [*] /* capture optimization options, display options, etc. */
221
+ ;
222
+ #d cr
223
+
224
+ * Unused
225
+ * SAVEcache
226
+ * USEcache
227
+ * CLEARcache
228
+
229
+ * Convert options to boolean
230
+ if ("`verbose'" == "") loc verbose 0
231
+ loc timeit = ("`timeit'"!="")
232
+ loc drop_singletons = ("`keepsingletons'" == "")
233
+ loc compact = ("`compact'" != "")
234
+
235
+ if (`timeit') timer on 29
236
+
237
+ * Sanity checks
238
+ if (`verbose'>-1 & "`keepsingletons'"!="") {
239
+ loc url "http://scorreia.com/reghdfe/nested_within_cluster.pdf"
240
+ loc msg "WARNING: Singleton observations not dropped; statistical significance is biased"
241
+ di as error `"`msg' {browse "`url'":(link)}"'
242
+ }
243
+ if ("`cluster'"!="") {
244
+ _assert ("`vce'"==""), msg("cannot specify both cluster() and vce()")
245
+ loc vce cluster `cluster'
246
+ loc cluster // clear it to avoid bugs in subsequent lines
247
+ }
248
+
249
+ * Split varlist into <depvar> and <indepvars>
250
+ ms_parse_varlist `varlist'
251
+ if (`verbose' > 0) {
252
+ di as text _n "## Parsing varlist: {res}`varlist'"
253
+ return list
254
+ }
255
+ loc depvar `r(depvar)'
256
+ loc indepvars `r(indepvars)'
257
+ loc fe_format "`r(fe_format)'"
258
+ loc basevars `r(basevars)'
259
+
260
+ * Parse Weights
261
+ if ("`weight'"!="") {
262
+ unab exp : `exp', min(1) max(1) // simple weights only
263
+ }
264
+
265
+ * Parse VCE
266
+ ms_parse_vce, vce(`vce') weighttype(`weight')
267
+ if (`verbose' > 0) {
268
+ di as text _n "## Parsing vce({res}`vce'{txt})"
269
+ sreturn list
270
+ }
271
+ loc vcetype = "`s(vcetype)'"
272
+ loc num_clusters = `s(num_clusters)'
273
+ loc clustervars = "`s(clustervars)'"
274
+ loc base_clustervars = "`s(base_clustervars)'"
275
+ loc vceextra = "`s(vceextra)'"
276
+
277
+ * Select sample (except for absvars)
278
+ loc varlist `depvar' `indepvars' `base_clustervars'
279
+ tempvar touse
280
+ marksample touse, strok // based on varlist + cluster + if + in + weight
281
+
282
+ * Parse noabsorb
283
+ _assert ("`absorb'`noabsorb'" != ""), msg("option {bf:absorb()} or {bf:noabsorb} required")
284
+ if ("`noabsorb'" != "") {
285
+ _assert ("`absorb'" == ""), msg("{bf:absorb()} and {bf:noabsorb} are mutually exclusive")
286
+ }
287
+
288
+ if (`timeit') timer off 29
289
+
290
+ * Construct HDFE object
291
+ // SYNTAX: fixed_effects(absvars | , touse, wtype, wtvar, dropsing, verbose)
292
+ ms_add_comma, loc(absorb) cmd(`"`absorb'"') opt(`"`options'"')
293
+ if (`timeit') timer on 20
294
+ mata: HDFE = fixed_effects(`"`absorb'"', "`touse'", "`weight'", "`exp'", `drop_singletons', `verbose')
295
+ if (`timeit') timer off 20
296
+ mata: HDFE.cmdline = "reghdfe " + st_local("0")
297
+ loc options `s(options)'
298
+
299
+ mata: st_local("N", strofreal(HDFE.N))
300
+ if (`N' == 0) error 2000
301
+
302
+ * Fill out HDFE object
303
+ * mata: HDFE.varlist = "`base_varlist'"
304
+ mata: HDFE.depvar = "`depvar'"
305
+ mata: HDFE.indepvars = "`indepvars'"
306
+ mata: HDFE.vcetype = "`vcetype'"
307
+ mata: HDFE.num_clusters = `num_clusters'
308
+ mata: HDFE.clustervars = tokens("`clustervars'")
309
+ mata: HDFE.base_clustervars = tokens("`base_clustervars'")
310
+ mata: HDFE.vceextra = "`vceextra'"
311
+
312
+ * Preserve memory
313
+ mata: HDFE.compact = `compact'
314
+ if (`compact') {
315
+ loc panelvar "`_dta[_TSpanel]'"
316
+ loc timevar "`_dta[_TStvar]'"
317
+
318
+ cap conf var `panelvar', exact
319
+ if (c(rc)) loc panelvar
320
+
321
+ cap conf var `timevar', exact
322
+ if (c(rc)) loc timevar
323
+
324
+ mata: HDFE.panelvar = "`panelvar'"
325
+ mata: HDFE.timevar = "`timevar'"
326
+ c_local keepvars `basevars' `base_clustervars' `panelvar' `timevar' // `exp'
327
+ }
328
+
329
+ * Parse summarize
330
+ if ("`summarize'" != "") {
331
+ _assert ("`summarize2'" == ""), msg("summarize() syntax error")
332
+ loc summarize2 mean min max // default values
333
+ }
334
+ ParseSummarize `summarize2'
335
+ mata: HDFE.summarize_stats = "`s(stats)'"
336
+ mata: HDFE.summarize_quietly = `s(quietly)'
337
+
338
+
339
+ * Parse misc options
340
+ mata: HDFE.notes = `"`notes'"'
341
+ mata: HDFE.store_sample = ("`nosample'"=="")
342
+ mata: HDFE.timeit = `timeit'
343
+
344
+
345
+ * Parse Coef Table Options (do this last!)
346
+ _get_diopts diopts options, `options' // store in `diopts', and the rest back to `options'
347
+ loc diopts `diopts' `header' `table' `footnote'
348
+ _assert (`"`options'"'==""), msg(`"invalid options: `options'"')
349
+ if ("`hascons'"!="") di in ye "(option ignored: `hascons')"
350
+ if ("`tsscons'"!="") di in ye "(option ignored: `tsscons')"
351
+ mata: HDFE.diopts = `"`diopts'"'
352
+ end
353
+
354
+
355
+ program ParseSummarize, sclass
356
+ sreturn clear
357
+ syntax [namelist(name=stats)] , [QUIetly]
358
+ local quietly = ("`quietly'"!="")
359
+ sreturn loc stats "`stats'"
360
+ sreturn loc quietly = `quietly'
361
+ end
362
+
363
+ // --------------------------------------------------------------------------
364
+
365
+ program Estimate, eclass
366
+ ereturn clear
367
+
368
+ * Parse and fill out HDFE object
369
+ Parse `0'
370
+ mata: st_local("timeit", strofreal(HDFE.timeit))
371
+ mata: st_local("compact", strofreal(HDFE.compact))
372
+ mata: st_local("verbose", strofreal(HDFE.verbose))
373
+
374
+ * Compute degrees-of-freedom
375
+ if (`timeit') timer on 21
376
+ mata: HDFE.estimate_dof()
377
+ if (`timeit') timer off 21
378
+
379
+ * Save updated e(sample) (singletons reduce sample);
380
+ * required to parse factor variables to partial out
381
+ if (`timeit') timer on 29
382
+ tempvar touse
383
+ mata: HDFE.save_touse("`touse'")
384
+ if (`timeit') timer off 29
385
+
386
+ * Expand varlists
387
+ if (`timeit') timer on 22
388
+ mata: st_local("depvar", HDFE.depvar)
389
+ mata: st_local("indepvars", HDFE.indepvars)
390
+ if (`verbose' > 0) di as text _n "## Parsing and expanding indepvars: {res}`indepvars'"
391
+ ms_expand_varlist `indepvars' if `touse'
392
+ if (`verbose' > 0) return list
393
+ mata: HDFE.fullindepvars = "`r(fullvarlist)'"
394
+ mata: HDFE.indepvars = "`r(varlist)'"
395
+ mata: HDFE.not_basevar = strtoreal(tokens("`r(not_omitted)'"))
396
+ mata: HDFE.varlist = "`depvar' `r(varlist)'"
397
+ if (`timeit') timer off 22
398
+
399
+ * Stats
400
+ mata: st_local("stats", HDFE.summarize_stats)
401
+ if ("`stats'" != "") Stats `touse'
402
+
403
+ * Condition number
404
+ mata: HDFE.estimate_cond()
405
+
406
+ * Preserve
407
+ if (`compact') {
408
+ if (`verbose' > 0) di as text "## Preserving dataset"
409
+ preserve
410
+ novarabbrev keep `keepvars'
411
+ }
412
+
413
+ * Partial out; save TSS of depvar
414
+ if (`timeit') timer on 23
415
+ // SYNTAX: partial_out(Varlist/Matrix | , Save TSS if HDFE.tss is missing? [0], Standardize data? [1], First col is depvar? [1])
416
+ // Note: standardize=2 will standardize, partial out, and return the data standardized!
417
+ mata: hdfe_variables = HDFE.partial_out(HDFE.varlist, 1, 2, .)
418
+ if (`timeit') timer off 23
419
+
420
+ * Regress
421
+ if (`timeit') timer on 24
422
+ tempname b V N rank df_r
423
+ mata: reghdfe_post_ols(HDFE, hdfe_variables, "`b'", "`V'", "`N'", "`rank'", "`df_r'")
424
+ mata: hdfe_variables = .
425
+ * Restore
426
+ if (`compact') {
427
+ if (`verbose' > 0) di as text "## Restoring dataset"
428
+ restore
429
+ mata: st_local("residuals", HDFE.residuals)
430
+ if ("`residuals'" != "") mata: HDFE.save_variable(HDFE.residuals, HDFE.residuals_vector, "Residuals")
431
+ }
432
+ RegressOLS `touse' `b' `V' `N' `rank' `df_r'
433
+ if (`timeit') timer off 24
434
+
435
+ * (optional) Store FEs
436
+ if (`timeit') timer on 29
437
+ reghdfe, store_alphas
438
+ if (`timeit') timer off 29
439
+
440
+ * View estimation tables
441
+ mata: st_local("diopts", HDFE.diopts)
442
+ Replay, `diopts'
443
+
444
+ if (`timeit') {
445
+ di as text _n "{bf: Timer results:}"
446
+ timer list
447
+ di as text "Legend: 20: Create HDFE object; 21: Estimate DoF; 22: expand varlists; 23: partial out; 24: regress; 29: rest"
448
+ di
449
+ }
450
+ end
451
+
452
+
453
+ program RegressOLS, eclass
454
+ args touse b V N rank df_r
455
+
456
+ mata: st_local("store_sample", strofreal(HDFE.store_sample))
457
+ if (`store_sample') loc esample "esample(`touse')"
458
+
459
+ mata: st_local("indepvars", HDFE.fullindepvars)
460
+ if ("`indepvars'" != "") {
461
+ matrix colnames `b' = `indepvars'
462
+ matrix colnames `V' = `indepvars'
463
+ matrix rownames `V' = `indepvars'
464
+ _ms_findomitted `b' `V'
465
+ ereturn post `b' `V', `esample' buildfvinfo depname(`depvar')
466
+ }
467
+ else {
468
+ ereturn post, `esample' buildfvinfo depname(`depvar')
469
+ }
470
+
471
+ ereturn scalar N = `N'
472
+ ereturn scalar rank = `rank'
473
+ ereturn scalar df_r = `df_r'
474
+ ereturn local cmd "reghdfe"
475
+ mata: HDFE.post()
476
+
477
+ * Post stats
478
+ cap conf matrix reghdfe_statsmatrix
479
+ if (!c(rc)) {
480
+ ereturn matrix summarize = reghdfe_statsmatrix
481
+ mata: st_local("summarize_quietly", strofreal(HDFE.summarize_quietly))
482
+ ereturn scalar summarize_quietly = `summarize_quietly'
483
+ }
484
+ end
485
+
486
+
487
+ program Replay, rclass
488
+ syntax [, noHEader noTABle noFOOTnote *]
489
+
490
+ if `"`e(cmd)'"' != "reghdfe" {
491
+ error 301
492
+ }
493
+
494
+ _get_diopts options, `options'
495
+ if ("`header'" == "") {
496
+ reghdfe_header // _coef_table_header
497
+ di ""
498
+ }
499
+ if ("`table'" == "") {
500
+ _coef_table, `options' // ereturn display, `options'
501
+ return add // adds r(level), r(table), etc. to ereturn (before the footnote deletes them)
502
+ }
503
+ if ("`footnote'" == "") {
504
+ reghdfe_footnote
505
+ }
506
+
507
+ * Replay stats
508
+ if (e(summarize_quietly)==0) {
509
+ di as text _n "{sf:Regression Summary Statistics:}" _c
510
+ matlist e(summarize)', border(top bottom) rowtitle(Variable) // twidth(18)
511
+ }
512
+ end
513
+
514
+
515
+ program Stats
516
+ args touse
517
+ * Optional weights
518
+ mata: st_local("weight", sprintf("[%s=%s]", HDFE.weight_type, HDFE.weight_var))
519
+ assert "`weight'" != ""
520
+ if ("`weight'" == "[=]") loc weight
521
+ loc weight : subinstr local weight "[pweight" "[aweight"
522
+
523
+ mata: st_local("stats", HDFE.summarize_stats)
524
+ mata: st_local("varlist", HDFE.varlist)
525
+ mata: st_local("cvars", invtokens(HDFE.cvars))
526
+ loc full_varlist `varlist' `cvars'
527
+
528
+ * quick workaround b/c -tabstat- does not support factor variables
529
+ fvrevar `full_varlist', list
530
+ loc full_varlist `r(varlist)'
531
+
532
+ qui tabstat `full_varlist' if `touse' `weight' , stat(`stats') col(stat) save
533
+ matrix reghdfe_statsmatrix = r(StatTotal)
534
+ end
535
+
536
+ findfile "reghdfe.mata"
537
+ include "`r(fn)'"
538
+
539
+ exit
30/replication_package/Adofiles/reghdfe_2019/reghdfe.mata ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // --------------------------------------------------------------------------
2
+ // Mata Code: FE Estimator (REGHDFE)
3
+ // --------------------------------------------------------------------------
4
+ // - Project URL: https://github.com/sergiocorreia/reghdfe
5
+ // - Dependency: https://github.com/sergiocorreia/ftools
6
+
7
+ *mata: mata clear
8
+ *mata: mata set matastrict on
9
+ mata: mata set mataoptimize on
10
+ *mata: mata set matadebug off
11
+ *mata: mata set matalnum off
12
+
13
+ // Include ftools -----------------------------------------------------------
14
+ cap findfile "ftools.mata"
15
+ if (_rc) {
16
+ di as error "reghdfe requires the {bf:ftools} package, which is not installed"
17
+ di as error `" - install from {stata ssc install ftools:SSC}"'
18
+ di as error `" - install from {stata `"net install ftools, from("https://github.com/sergiocorreia/ftools/raw/master/src/")"':Github}"'
19
+ exit 9
20
+ }
21
+ include "`r(fn)'"
22
+
23
+
24
+ // Custom types -------------------------------------------------------------
25
+ loc FixedEffects class FixedEffects scalar
26
+ loc Factors class Factor rowvector
27
+ loc BipartiteGraph class BipartiteGraph scalar
28
+ loc FactorPointer pointer(`Factor') scalar
29
+
30
+
31
+ // Versioning ---------------------------------------------------------------
32
+ ms_get_version reghdfe // from parsetools package
33
+ assert("`package_version'" != "")
34
+ mata: string scalar reghdfe_version() return("`package_version'")
35
+ mata: string scalar reghdfe_stata_version() return("`c(stata_version)'")
36
+ mata: string scalar reghdfe_joint_version() return("`package_version'|`c(stata_version)'")
37
+
38
+
39
+ // Includes -----------------------------------------------------------------
40
+ findfile "reghdfe_bipartite.mata"
41
+ include "`r(fn)'"
42
+
43
+ findfile "reghdfe_class.mata"
44
+ include "`r(fn)'"
45
+
46
+ findfile "reghdfe_constructor.mata"
47
+ include "`r(fn)'"
48
+
49
+ findfile "reghdfe_common.mata"
50
+ include "`r(fn)'"
51
+
52
+ findfile "reghdfe_projections.mata"
53
+ include "`r(fn)'"
54
+
55
+ findfile "reghdfe_transforms.mata"
56
+ include "`r(fn)'"
57
+
58
+ findfile "reghdfe_accelerations.mata"
59
+ include "`r(fn)'"
60
+
61
+ findfile "reghdfe_lsmr.mata"
62
+ include "`r(fn)'"
30/replication_package/Adofiles/reghdfe_2019/reghdfe.sthlp ADDED
@@ -0,0 +1,801 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *! version 5.7.3 13nov2019}{...}
3
+ {vieweralsosee "[R] areg" "help areg"}{...}
4
+ {vieweralsosee "[R] xtreg" "help xtreg"}{...}
5
+ {vieweralsosee "[R] ivregress" "help ivregress"}{...}
6
+ {vieweralsosee "" "--"}{...}
7
+ {vieweralsosee "reghdfe_mata" "help reghdfe_mata"}{...}
8
+ {vieweralsosee "ivreghdfe" "help ivreghdfe"}{...}
9
+ {vieweralsosee "ppmlhdfe" "help ppmlhdfe"}{...}
10
+ {vieweralsosee "ivreg2" "help ivreg2"}{...}
11
+ {vieweralsosee "ftools" "help ftools"}{...}
12
+ {vieweralsosee "" "--"}{...}
13
+ {vieweralsosee "ivregress" "help ivregress"}{...}
14
+ {vieweralsosee "reg2hdfe" "help reg2hdfe"}{...}
15
+ {vieweralsosee "a2reg" "help a2reg"}{...}
16
+ {viewerjumpto "Syntax" "reghdfe##syntax"}{...}
17
+ {viewerjumpto "description" "reghdfe##description"}{...}
18
+ {viewerjumpto "Options" "reghdfe##options"}{...}
19
+ {viewerjumpto "Postestimation Syntax" "reghdfe##postestimation"}{...}
20
+ {viewerjumpto "Remarks" "reghdfe##remarks"}{...}
21
+ {viewerjumpto "Examples" "reghdfe##examples"}{...}
22
+ {viewerjumpto "Stored results" "reghdfe##results"}{...}
23
+ {viewerjumpto "Author" "reghdfe##contact"}{...}
24
+ {viewerjumpto "Updates" "reghdfe##updates"}{...}
25
+ {viewerjumpto "Acknowledgements" "reghdfe##acknowledgements"}{...}
26
+ {viewerjumpto "References" "reghdfe##references"}{...}
27
+ {title:Title}
28
+
29
+ {p2colset 5 18 20 2}{...}
30
+ {p2col :{cmd:reghdfe} {hline 2}}Linear regression absorbing multiple levels of fixed effects{p_end}
31
+ {p2colreset}{...}
32
+
33
+ {marker syntax}{...}
34
+ {title:Syntax}
35
+
36
+ {p 8 15 2} {cmd:reghdfe}
37
+ {depvar} [{indepvars}]
38
+ {ifin} {it:{weight}} {cmd:,} {opth a:bsorb(reghdfe##absvar:absvars)} [{help reghdfe##options:options}] {p_end}
39
+
40
+ {marker opt_summary}{...}
41
+ {synoptset 22 tabbed}{...}
42
+ {synopthdr}
43
+ {synoptline}
44
+ {syntab:Model {help reghdfe##opt_model:[+]}}
45
+ {p2coldent:* {opth a:bsorb(reghdfe##absvar:absvars)}}categorical variables that identify the fixed effects to be absorbed{p_end}
46
+ {synopt: {cmdab:a:bsorb(}{it:...}{cmd:,} {cmdab:save:fe)}}save all fixed effect estimates with the {it:__hdfe*} prefix{p_end}
47
+ {synopt: {cmdab:noa:bsorb}}only absorb the constant; alternative to
48
+ {cmd:regress} that supports for multi-way-clustering{p_end}
49
+ {synopt : {opth res:iduals(newvar)}}save residuals; {it:predict, d} requires this option{p_end}
50
+ {synopt :{opth su:mmarize(tabstat##statname:stats)}}equivalent to the postestimation command {help reghdfe##postestimation:estat summarize},
51
+ but more flexible, faster, and saves results on {it:e(summarize)}{p_end}
52
+
53
+ {syntab:SE/Robust {help reghdfe##opt_vce:[+]}}
54
+ {p2coldent:+ {opt vce}{cmd:(}{help reghdfe##opt_vce:vcetype} [{cmd:,}{it:opt}]{cmd:)}}{it:vcetype}
55
+ may be {opt un:adjusted} (default), {opt r:obust} or {opt cl:uster} {help fvvarlist} (allowing two- and multi-way clustering){p_end}
56
+
57
+ {syntab:Diagnostic {help reghdfe##opt_diagnostic:[+]}}
58
+ {synopt :{opt v:erbose(#)}}amount of debugging information to show (0=None, 1=Some, 2=More, 3=Parsing/convergence details, 4=Every iteration){p_end}
59
+ {synopt :{opt time:it}}show elapsed times by stage of computation{p_end}
60
+
61
+ {syntab:Optimization {help reghdfe##opt_optimization:[+]}}
62
+ {p2coldent:+ {opth tol:erance(#)}}criterion for convergence (default=1e-8){p_end}
63
+ {synopt :{opth maxit:erations(#)}}maximum number of iterations (default=10,000); if set to missing ({cmd:.}) it will run for as long as it takes.{p_end}
64
+ {synopt :{opt accel:eration(str)}}acceleration method; options are conjugate_gradient (cg), steep_descent (sd), aitken (a),
65
+ {browse "http://web.stanford.edu/group/SOL/software/lsmr/":lsmr} (with diagonal preconditioner), and none (no){p_end}
66
+ {synopt :{opt transf:orm(str)}}transform operation that defines the type of alternating projection; options are Kaczmarz (kac), Cimmino (cim), Symmetric Kaczmarz (sym).
67
+ This is ignored with LSMR acceleration{p_end}
68
+ {synopt :{opt prune}}prune vertices of degree-1; acts as a preconditioner
69
+ that is useful if the underlying network is very sparse{p_end}
70
+ {synopt :{opt cond}}compute the finite condition number;
71
+ will only run successfully with few fixed effects
72
+ (because it computes the eigenvalues of the graph Laplacian){p_end}
73
+
74
+ {syntab:Memory Usage {help reghdfe##memory:[+]}}
75
+ {synopt :{opth pool:size(#)}}apply the within algorithm in groups of {it:#} variables (else, it will run on all variables at the same time).
76
+ A large pool size is usually faster but uses more memory{p_end}
77
+ {synopt :{opt compact}}preserve the dataset and drop variables as much as possible on every step{p_end}
78
+
79
+ {syntab:Speedup Tricks {help reghdfe##opt_speedup:[+]}}
80
+ {synopt :{opt nosamp:le}}will not create {it:e(sample)},
81
+ saving some space and speed{p_end}
82
+
83
+ {syntab:Degrees-of-Freedom Adjustments {help reghdfe##opt_dof:[+]}}
84
+ {synopt :{opt dof:adjustments(list)}}allows selecting the desired adjustments for degrees of freedom;
85
+ rarely used{p_end}
86
+ {synopt: {opth groupv:ar(newvar)}}unique identifier for the first mobility group{p_end}
87
+
88
+ {syntab:Reporting {help reghdfe##opt_reporting:[+]}}
89
+ {synopt :{opt version:}}reports the version number and date of reghdfe, and the list of required packages. standalone option{p_end}
90
+ {synopt :{opt l:evel(#)}}set confidence level; default is {cmd:level(95)}{p_end}
91
+ {synopt :{it:{help reghdfe##display_options:display_options}}}control column formats, row spacing, line width, display of omitted variables and base and empty cells, and factor-variable labeling.{p_end}
92
+ {synopt :}particularly useful are the {opt noomit:ted} and {opt noempty} options to hide regressors omitted due to collinearity{p_end}
93
+
94
+ {syntab:Undocumented}
95
+ {synopt :{opt keepsin:gletons}}do not drop singleton groups{p_end}
96
+ {synopt :{opt nocon:stant}}Do not report estimates for {it:_cons}{p_end}
97
+ {synopt :{opt old}}will call the latest 3.x version of reghdfe instead (see the {help reghdfe_old:old help file}){p_end}
98
+ {synopt :{opth rre(varname)}}where varname is the residual of a proven prev. regression of y against only the FEs{p_end}
99
+ {synopt :{opt check}}compile {it:lreghdfe.mlib} if it does not exist or if it needs to be updated;
100
+ use {cmd:reghdfe,compile} to force an update{p_end}
101
+ {synopt :{opt update}}update reghdfe and dependencies from the respective Github repositories;
102
+ use {cmd:reghdfe,reload} to do so from {it:c:\git\*}{p_end}
103
+ {synoptline}
104
+ {p2colreset}{...}
105
+ {p 4 6 2}* either {opt a:bsorb(absvars)} or {opt noa:bsorb} is required.{p_end}
106
+ {p 4 6 2}+ indicates a recommended or important option.{p_end}
107
+ {p 4 6 2}the regression variables may contain {help tsvarlist:time-series operators} and {help fvvarlist:factor variables};
108
+ the dependent variable cannot be of the form {it:i.turn}, but {it:42.turn} is allowed{p_end}
109
+ {p 4 6 2}{cmd:fweight}s, {cmd:aweight}s and {cmd:pweight}s are allowed; see {help weight}.{p_end}
110
+
111
+
112
+ {marker absvar}{...}
113
+ {title:Absvar Syntax}
114
+
115
+ {synoptset 22}{...}
116
+ {synopthdr:absvar}
117
+ {synoptline}
118
+ {synopt:{cmd:i.}{it:varname}}categorical variable to be absorbed (the {cmd:i.} prefix is tacit){p_end}
119
+ {synopt:{cmd:i.}{it:var1}{cmd:#i.}{it:var2}}absorb the interactions of multiple categorical variables{p_end}
120
+ {synopt:{cmd:i.}{it:var1}{cmd:#}{cmd:c.}{it:var2}}absorb heterogeneous slopes, where {it:var2} has a different slope coef. depending on the category of {it:var1}{p_end}
121
+ {synopt:{it:var1}{cmd:##}{cmd:c.}{it:var2}}equivalent to "{cmd:i.}{it:var1} {cmd:i.}{it:var1}{cmd:#}{cmd:c.}{it:var2}", but {it:much} faster{p_end}
122
+ {synopt:{it:var1}{cmd:##c.(}{it:var2 var3}{cmd:)}}multiple heterogeneous slopes are allowed together. Alternative syntax: {it:var1}{cmd:##(c.}{it:var2} {cmd:c.}{it:var3}{cmd:)}{p_end}
123
+ {synopt:{it:v1}{cmd:#}{it:v2}{cmd:#}{it:v3}{cmd:##c.(}{it:v4 v5}{cmd:)}}factor operators can be combined{p_end}
124
+ {synoptline}
125
+ {p2colreset}{...}
126
+ {p 4 6 2}To save the estimates specific absvars, write {newvar}{inp:={it:absvar}}.{p_end}
127
+ {p 4 6 2}Please be aware that in most cases these estimates are neither consistent nor econometrically identified.{p_end}
128
+ {p 4 6 2}Using categorical interactions (e.g. {it:x}{cmd:#}{it:z}) is faster than running {it:egen group(...)} beforehand.{p_end}
129
+ {p 4 6 2}Singleton obs. are dropped iteratively until no more singletons are found (see ancilliary article for details).{p_end}
130
+ {p 4 6 2}Slope-only absvars ("state#c.time") have poor numerical stability and slow convergence.
131
+ If you need those, either i) increase tolerance or
132
+ ii) use slope-and-intercept absvars ("state##c.time"), even if the intercept is redundant.
133
+ For instance if absvar is "i.zipcode i.state##c.time" then i.state is redundant given i.zipcode, but
134
+ convergence will still be {it:much} faster.{p_end}
135
+
136
+ {marker description}{...}
137
+ {title:Description}
138
+
139
+ {pstd}
140
+ {cmd:reghdfe} is a generalization of {help areg} (and {help xtreg:xtreg,fe}, {help xtivreg:xtivreg,fe}) for multiple levels of fixed effects
141
+ (including heterogeneous slopes), alternative estimators (2sls, gmm2s, liml), and additional robust standard errors (multi-way clustering, HAC standard errors, etc).{p_end}
142
+
143
+ {pstd}Additional features include:{p_end}
144
+
145
+ {p2col 8 12 12 2: a)}A novel and robust algorithm to efficiently absorb the fixed effects (extending the work of Guimaraes and Portugal, 2010).{p_end}
146
+ {p2col 8 12 12 2: b)}Coded in Mata, which in most scenarios makes it even faster than {it:areg} and {it:xtreg} for a single fixed effect (see benchmarks on the Github page).{p_end}
147
+ {p2col 8 12 12 2: c)}Can save the point estimates of the fixed effects ({it:caveat emptor}: the fixed effects may not be identified, see the {help reghdfe##references:references}).{p_end}
148
+ {p2col 8 12 12 2: d)}Calculates the degrees-of-freedom lost due to the fixed effects
149
+ (note: beyond two levels of fixed effects, this is still an open problem, but we provide a conservative approximation).{p_end}
150
+ {p2col 8 12 12 2: e)}Iteratively removes singleton groups by default, to avoid biasing the standard errors (see ancillary document).{p_end}
151
+
152
+ {pstd}
153
+ For a description of its internal Mata API, see {help reghdfe_mata}.
154
+
155
+ {marker options}{...}
156
+ {title:Options}
157
+
158
+ {marker opt_model}{...}
159
+ {dlgtab:Model and Miscellanea}
160
+
161
+ {phang}
162
+ {opth a:bsorb(reghdfe##absvar:absvars)} list of categorical variables (or interactions) representing the fixed effects to be absorbed.
163
+ this is equivalent to including an indicator/dummy variable for each category of each {it:absvar}. {cmd:absorb()} is required.
164
+
165
+ {pmore}
166
+ To save a fixed effect, prefix the absvar with "{newvar}{cmd:=}".
167
+ For instance, the option {cmd:absorb(firm_id worker_id year_coefs=year_id)} will include firm,
168
+ worker and year fixed effects, but will only save the estimates for the year fixed effects (in the new variable {it:year_coefs}).
169
+
170
+ {pmore}
171
+ If you want to {help reghdfe##postestimation:predict} afterwards but don't care about setting the names of each fixed effect, use the {cmdab:save:fe} suboption.
172
+ This will delete all variables named {it:__hdfe*__} and create new ones as required.
173
+ Example: {it:reghdfe price weight, absorb(turn trunk, savefe)}
174
+
175
+ {phang}
176
+ {opth res:iduals(newvar)} will save the regression residuals in a new variable.
177
+
178
+ {pmore} {opt res:iduals} (without parenthesis) saves the residuals
179
+ in the variable {it:_reghdfe_resid}.
180
+
181
+ {pmore}
182
+ This option does not require additional computations, and is required for
183
+ subsequent calls to {cmd:predict, d}.
184
+
185
+ {phang}
186
+ {opth su:mmarize(tabstat##statname:stats)} will report and save a table of summary of statistics of the regression
187
+ variables (including the instruments, if applicable), using the same sample as the regression.
188
+
189
+ {pmore} {opt su:mmarize} (without parenthesis) saves the default set of statistics: {it:mean min max}.
190
+
191
+ {pmore} The complete list of accepted statistics is available in the {help tabstat##statname:tabstat help}. The most useful are {it:count range sd median p##}.
192
+
193
+ {pmore} The summary table is saved in {it:e(summarize)}
194
+
195
+ {pmore} To save the summary table silently (without showing it after the regression table), use the {opt qui:etly} suboption. You can use it by itself ({cmd:summarize(,quietly)}) or with custom statistics ({cmd:summarize(mean, quietly)}).
196
+
197
+ {phang}
198
+ {opt subopt:ions(...)}
199
+ options that will be passed directly to the regression command (either {help regress}, {help ivreg2}, or {help ivregress})
200
+
201
+ {marker opt_vce}{...}
202
+ {dlgtab:SE/Robust}
203
+
204
+ {phang}
205
+ {opth vce:(reghdfe##vcetype:vcetype, subopt)}
206
+ specifies the type of standard error reported.
207
+ Note that all the advanced estimators rely on asymptotic theory, and will likely have poor performance with small samples
208
+ (but again if you are using reghdfe, that is probably not your case)
209
+
210
+ {pmore}
211
+ {opt un:adjusted}/{opt ols:} estimates conventional standard errors, valid even in small samples
212
+ under the assumptions of homoscedasticity and no correlation between observations
213
+
214
+ {pmore}
215
+ {opt r:obust} estimates heteroscedasticity-consistent standard errors (Huber/White/sandwich estimators), but still assuming independence between observations
216
+
217
+ {pmore}Warning: in a FE panel regression, using {opt r:obust} will
218
+ lead to inconsistent standard errors if for every fixed effect, the {it:other} dimension is fixed.
219
+ For instance, in an standard panel with individual and time fixed effects, we require both the number of
220
+ individuals and time periods to grow asymptotically.
221
+ If that is not the case, an alternative may be to use clustered errors,
222
+ which as discussed below will still have their own asymptotic requirements.
223
+ For a discussion, see
224
+ {browse "http://www.princeton.edu/~mwatson/papers/ecta6489.pdf":Stock and Watson, "Heteroskedasticity-robust standard errors for fixed-effects panel-data regression," Econometrica 76 (2008): 155-174}
225
+
226
+ {pmore}
227
+ {opt cl:uster} {it:clustervars} estimates consistent standard errors even when the observations
228
+ are correlated within groups.
229
+
230
+ {pmore}
231
+ Multi-way-clustering is allowed. Thus, you can indicate as many {it:clustervar}s as desired
232
+ (e.g. allowing for intragroup correlation across individuals, time, country, etc).
233
+
234
+ {pmore}
235
+ Each {it:clustervar} permits interactions of the type {it:var1{cmd:#}var2}
236
+ (this is faster than using {cmd:egen group()} for a one-off regression).
237
+
238
+ {pmore} Warning: The number of clusters, for all of the cluster variables, must go off to infinity.
239
+ A frequent rule of thumb is that each cluster variable must have at least 50 different categories
240
+ (the number of categories for each clustervar appears on the header of the regression table).
241
+
242
+ {pstd}
243
+ The following suboptions require either the {help ivreg2} or the {help avar} package from SSC.
244
+ For a careful explanation, see the {help ivreg2##s_robust:ivreg2 help file}, from which the comments below borrow.
245
+
246
+ {pmore}
247
+ {opt u:nadjusted}{cmd:, }{opt bw(#)} (or just {cmd:, }{opt bw(#)}) estimates autocorrelation-consistent standard errors (Newey-West).
248
+
249
+ {pmore}
250
+ {opt r:obust}{cmd:, }{opt bw(#)} estimates autocorrelation-and-heteroscedasticity consistent standard errors (HAC).
251
+
252
+ {pmore}
253
+ {opt cl:uster} {it:clustervars}{cmd:, }{opt bw(#)} estimates standard errors consistent to common autocorrelated disturbances (Driscoll-Kraay). At most two cluster variables can be used in this case.
254
+
255
+ {pmore}
256
+ {cmd:, }{opt kiefer} estimates standard errors consistent under arbitrary intra-group autocorrelation (but not heteroskedasticity) (Kiefer).
257
+
258
+ {pmore}
259
+ {opt kernel(str)} is allowed in all the cases that allow {opt bw(#)}
260
+ The default kernel is {it:bar} (Bartlett). Valid kernels are Bartlett (bar); Truncated (tru); Parzen (par);
261
+ Tukey-Hanning (thann); Tukey-Hamming (thamm); Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs).
262
+
263
+ {pstd}
264
+ Advanced suboptions:
265
+
266
+ {pmore}
267
+ {cmd:, }{opt suite(default|mwc|avar)} overrides the package chosen by reghdfe to estimate the VCE.
268
+ {it:default} uses the default Stata computation (allows unadjusted, robust, and at most one cluster variable).
269
+ {it:mwc} allows multi-way-clustering (any number of cluster variables), but without the {it:bw} and {it:kernel} suboptions.
270
+ {it:avar} uses the avar package from SSC. Is the same package used by ivreg2, and allows the {it:bw}, {it:kernel}, {it:dkraay} and {it:kiefer} suboptions.
271
+ This is useful almost exclusively for debugging.
272
+
273
+ {pmore}
274
+ {cmd:, }{opt twice:robust} will compute robust standard errors not only on the first but on the second step of the gmm2s estimation. Requires {opt ivsuite(ivregress)}, but will not give the exact same results as ivregress.
275
+
276
+ {pmore}{it:Explanation:} When running instrumental-variable regressions with the {cmd:ivregress} package,
277
+ robust standard errors, and a gmm2s estimator, reghdfe will translate
278
+ {opt vce(robust)} into {opt wmatrix(robust)} {opt vce(unadjusted)}.
279
+ This maintains compatibility with {cmd:ivreg2} and other packages, but may unadvisable as described in {help ivregress} (technical note). Specifying this option will instead use {opt wmatrix(robust)} {opt vce(robust)}.
280
+
281
+ {pmore}However, computing the second-step vce matrix requires computing updated estimates (including updated fixed effects).
282
+ Since reghdfe currently does not allow this, the resulting standard errors
283
+ {hi:will not be exactly the same as with ivregress}.
284
+ This issue is similar to applying the CUE estimator, described further below.
285
+
286
+ {pmore}Note: The above comments are also appliable to clustered standard error.
287
+
288
+ {marker opt_iv}{...}
289
+ {dlgtab:IV/2SLS/GMM}
290
+
291
+ {phang}
292
+ The IV functionality of {cmd:reghdfe} has been moved into {ivreghdfe}.
293
+
294
+ {marker opt_diagnostic}{...}
295
+ {dlgtab:Diagnostic}
296
+
297
+ {phang}
298
+ {opt v:erbose(#)} orders the command to print debugging information.
299
+
300
+ {pmore}
301
+ Possible values are 0 (none), 1 (some information), 2 (even more), 3 (adds dots for each iteration, and reportes parsing details), 4 (adds details for every iteration step)
302
+
303
+ {pmore}
304
+ For debugging, the most useful value is 3. For simple status reports, set verbose to 1.
305
+
306
+ {phang}
307
+ {opt time:it} shows the elapsed time at different steps of the estimation. Most time is usually spent on three steps: map_precompute(), map_solve() and the regression step.
308
+
309
+ {marker opt_dof}{...}
310
+ {dlgtab:Degrees-of-Freedom Adjustments}
311
+
312
+ {phang}
313
+ {opt dof:adjustments(doflist)} selects how the degrees-of-freedom, as well as e(df_a), are adjusted due to the absorbed fixed effects.
314
+
315
+ {pmore}
316
+ Without any adjustment, we would assume that the degrees-of-freedom used by the fixed effects is equal to the count of all the fixed effects
317
+ (e.g. number of individuals + number of years in a typical panel).
318
+ However, in complex setups (e.g. fixed effects by individual, firm, job position, and year),
319
+ there may be a huge number of fixed effects collinear with each other, so we want to adjust for that.
320
+
321
+ {pmore}
322
+ Note: changing the default option is rarely needed, except in benchmarks, and to obtain a marginal speed-up by excluding the {opt pair:wise} option.
323
+
324
+ {pmore}
325
+ {opt all} is the default and almost always the best alternative. It is equivalent to {opt dof(pairwise clusters continuous)}
326
+
327
+ {pmore}
328
+ {opt none} assumes no collinearity across the fixed effects (i.e. no redundant fixed effects). This is overtly conservative, although it is the faster method by virtue of not doing anything.
329
+
330
+ {pmore}
331
+ {opt first:pair} will exactly identify the number of collinear fixed effects across the first two sets of fixed effects
332
+ (i.e. the first absvar and the second absvar).
333
+ The algorithm used for this is described in Abowd et al (1999), and relies on results from graph theory
334
+ (finding the number of connected sub-graphs in a bipartite graph).
335
+ It will not do anything for the third and subsequent sets of fixed effects.
336
+
337
+ {pmore}
338
+ For more than two sets of fixed effects, there are no known results that provide exact degrees-of-freedom as in the case above.
339
+ One solution is to ignore subsequent fixed effects (and thus oversestimate e(df_a) and understimate the degrees-of-freedom).
340
+ Another solution, described below, applies the algorithm between pairs of fixed effects to obtain a better (but not exact) estimate:
341
+
342
+ {pmore}
343
+ {opt pair:wise} applies the aforementioned connected-subgraphs algorithm between pairs of fixed effects.
344
+ For instance, if there are four sets of FEs, the first dimension will usually have no redundant coefficients (i.e. e(M1)==1), since we are running the model without a constant.
345
+ For the second FE, the number of connected subgraphs with respect to the first FE will provide an exact estimate of the degrees-of-freedom lost, e(M2).
346
+
347
+ {pmore}
348
+ For the third FE, we do not know exactly.
349
+ However, we can compute the number of connected subgraphs between the first and third {it:G(1,3)},
350
+ and second and third {it:G(2,3)} fixed effects, and choose the higher of those as the closest estimate for e(M3).
351
+ For the fourth FE, we compute {it:G(1,4)}, {it:G(2,4)} and {it:G(3,4)} and again choose the highest for e(M4).
352
+
353
+ {pmore}
354
+ Finally, we compute e(df_a) = e(K1) - e(M1) + e(K2) - e(M2) + e(K3) - e(M3) + e(K4) - e(M4);
355
+ where e(K#) is the number of levels or dimensions for the #-th fixed effect (e.g. number of individuals or years).
356
+ Note that e(M3) and e(M4) are only conservative estimates and thus we will usually be overestimating the standard errors. However, given the sizes of the datasets typically used with reghdfe, the difference should be small.
357
+
358
+ {pmore}
359
+ Since the gain from {opt pair:wise} is usually {it:minuscule} for large datasets, and the computation is expensive, it may be a good practice to exclude this option for speedups.
360
+
361
+ {pmore}
362
+ {opt cl:usters}
363
+ will check if a fixed effect is nested within a {it:clustervar}.
364
+ In that case, it will set e(K#)==e(M#) and no degrees-of-freedom will be lost due to this fixed effect.
365
+ The rationale is that we are already assuming that the number of effective observations is the number of cluster levels.
366
+ This is the same adjustment that {cmd:xtreg, fe} does, but {cmd:areg} does not use it.
367
+
368
+ {pmore}
369
+ {opt cont:inuous}
370
+ Fixed effects with continuous interactions (i.e. individual slopes, instead of individual intercepts) are dealt with differently.
371
+ In an i.categorical#c.continuous interaction, we will do one check: we count the number of categories where c.continuous is always zero.
372
+ In an i.categorical##c.continuous interaction, we do the above check but replace zero for any particular constant.
373
+ In the case where continuous is constant for a level of categorical, we know it is collinear with the intercept, so we adjust for it.
374
+
375
+ {pmore}
376
+ Additional methods, such as {opt bootstrap} are also possible but not yet implemented.
377
+ Some preliminary simulations done by the author showed a very poor convergence of this method.
378
+
379
+ {phang}
380
+ {opth groupv:ar(newvar)} name of the new variable that will contain the first mobility group.
381
+ Requires {opt pair:wise}, {opt first:pair}, or the default {opt all}.
382
+
383
+ {marker opt_speedup}{...}
384
+ {dlgtab:Speeding Up Estimation}
385
+
386
+ {phang}
387
+ {opt nosample} avoids saving {it:e(sample)} into the regression.
388
+ Since saving the variable only involves copying a Mata vector, the speedup is currently quite small.
389
+ Future versions of reghdfe may change this as features are added.
390
+
391
+ {pmore}
392
+ Note that {opt nosample} will be disabled when adding variables to the dataset (i.e. when saving residuals, fixed effects, or mobility groups), and is incompatible with most postestimation commands.
393
+
394
+ {pmore}
395
+ If you wish to use {opt nosample} while reporting {cmd:estat summarize}, see the {opt summarize} option.
396
+
397
+ {marker opt_optimization}{...}
398
+ {dlgtab:Optimization}
399
+
400
+ {phang}
401
+ {opth tol:erance(#)} specifies the tolerance criterion for convergence; default is {cmd:tolerance(1e-8)}
402
+
403
+ {pmore}
404
+ Note that for tolerances beyond 1e-14, the limits of the {it:double} precision are reached and the results will most likely not converge.
405
+
406
+ {pmore}
407
+ At the other end, is not tight enough, the regression may not identify perfectly collinear regressors. However, those cases can be easily spotted due to their extremely high standard errors.
408
+
409
+ {pmore}
410
+ Warning: when absorbing heterogeneous slopes without the accompanying heterogeneous intercepts, convergence is quite poor and a tight tolerance is strongly suggested (i.e. higher than the default). In other words, an absvar of {it:var1##c.var2} converges easily, but an absvar of {it:var1#c.var2} will converge slowly and may require a tighter tolerance.
411
+
412
+ {phang}
413
+ {opth maxit:erations(#)}
414
+ specifies the maximum number of iterations; the default is {cmd:maxiterations(10000)}; set it to missing ({cmd:.}) to run forever until convergence.
415
+
416
+ {phang}
417
+ {opth pool:size(#)}
418
+ Number of variables that are {it:pooled together} into a matrix that will then be transformed.
419
+ The default is to pool variables in groups of 5. Larger groups are faster with more than one processor, but may cause out-of-memory errors. In that case, set poolsize to 1.
420
+
421
+ {phang}
422
+ {it:Advanced options:}
423
+
424
+ {phang}
425
+ {opt acceleration(str)} allows for different acceleration techniques, from the simplest case of
426
+ no acceleration ({opt no:ne}), to steep descent ({opt st:eep_descent} or {opt sd}), Aitken ({opt a:itken}),
427
+ and finally Conjugate Gradient ({opt co:njugate_gradient} or {opt cg}).
428
+
429
+ {pmore}
430
+ Note: Each acceleration is just a plug-in Mata function, so a larger number of acceleration techniques are available, albeit undocumented (and slower).
431
+
432
+ {phang}
433
+ {opt transf:orm(str)} allows for different "alternating projection" transforms. The classical transform is Kaczmarz ({opt kac:zmarz}), and more stable alternatives are Cimmino ({opt cim:mino}) and Symmetric Kaczmarz ({opt sym:metric_kaczmarz})
434
+
435
+ {pmore}
436
+ Note: Each transform is just a plug-in Mata function, so a larger number of acceleration techniques are available, albeit undocumented (and slower).
437
+
438
+ {pmore}
439
+ Note: The default acceleration is Conjugate Gradient and the default transform is Symmetric Kaczmarz. Be wary that different accelerations often work better with certain transforms. For instance, do not use conjugate gradient with plain Kaczmarz, as it will not converge.
440
+
441
+ {phang}
442
+ {opt precondition} {it:(currently disabled)}
443
+
444
+ {marker opt_reporting}{...}
445
+ {dlgtab:Reporting}
446
+
447
+ {phang}
448
+ {opt l:evel(#)} sets confidence level; default is {cmd:level(95)}
449
+
450
+ {marker display_options}{...}
451
+ {phang}
452
+ {it:display_options}:
453
+ {opt noomit:ted},
454
+ {opt vsquish},
455
+ {opt noempty:cells},
456
+ {opt base:levels},
457
+ {opt allbase:levels},
458
+ {opt nofvlabel},
459
+ {opt fvwrap(#)},
460
+ {opt fvwrapon(style)},
461
+ {opth cformat(%fmt)},
462
+ {opt pformat(%fmt)},
463
+ {opt sformat(%fmt)}, and
464
+ {opt nolstretch};
465
+ see {helpb estimation options##display_options:[R] estimation options}.
466
+ {p_end}
467
+
468
+
469
+ {marker postestimation}{...}
470
+ {title:Postestimation Syntax}
471
+
472
+ Only {cmd:estat summarize}, {cmd:predict} and {cmd:test} are currently supported and tested.
473
+
474
+ {p 8 13 2}
475
+ {cmd:estat summarize}
476
+ {p_end}{col 23}Summarizes {it:depvar} and the variables described in {it:_b} (i.e. not the excluded instruments)
477
+
478
+ {p 8 16 2}
479
+ {cmd:predict}
480
+ {newvar}
481
+ {ifin}
482
+ [{cmd:,} {it:statistic}]
483
+ {p_end}{col 23}May require you to previously save the fixed effects (except for option {opt xb}).
484
+ {col 23}To see how, see the details of the {help reghdfe##absvar:absorb} option
485
+ {col 23}Equation: y = xb + d_absorbvars + e
486
+
487
+ {synoptset 20 tabbed}{...}
488
+ {synopthdr:statistic}
489
+ {synoptline}
490
+ {syntab :Main}
491
+ {p2coldent: {opt xb}}xb fitted values; the default{p_end}
492
+ {p2coldent: {opt xbd}}xb + d_absorbvars{p_end}
493
+ {p2coldent: {opt d}}d_absorbvars{p_end}
494
+ {p2coldent: {opt r:esiduals}}residual{p_end}
495
+ {p2coldent: {opt sc:ore}}score; equivalent to {opt residuals}{p_end}
496
+ {p2coldent: {opt stdp}}standard error of the prediction (of the xb component){p_end}
497
+ {synoptline}
498
+ {p2colreset}{...}
499
+ {p 4 6 2}although {cmd:predict} {help data_types:type} {help newvar} is allowed,
500
+ the resulting variable will always be of type {it:double}.{p_end}
501
+
502
+
503
+ {col 8}{cmd:test}{col 23}Performs significance test on the parameters, see the {help test:stata help}
504
+
505
+ {col 8}{cmd:suest}{col 23}Do not use {cmd:suest}. It will run, but the results will be incorrect. See workaround below
506
+
507
+ {pmore}If you want to perform tests that are usually run with {cmd:suest},
508
+ such as non-nested models, tests using alternative specifications of the variables,
509
+ or tests on different groups, you can replicate it manually, as described
510
+ {browse "http://www.stata.com/statalist/archive/2009-11/msg01485.html":here}.
511
+ {p_end}
512
+
513
+ {marker remarks}{...}
514
+
515
+ {title:Possible Pitfalls and Common Mistakes}
516
+
517
+ {p2col 8 12 12 2: 1.}Ignore the constant; it doesn't tell you much. If you want to use descriptive stats, that's what the {opt sum:marize()} and {cmd:estat summ} commands are for.
518
+ Even better, use {opt noconstant} to hide it{p_end}
519
+ {p2col 8 12 12 2: 2.}Think twice before saving the fixed effects. They are probably inconsistent / not identified and you will likely be using them wrong.{p_end}
520
+ {p2col 8 12 12 2: 3.}(note: as of version 3.0 singletons are dropped by default) It's good practice to drop singletons. {opt dropsi:ngleton} is your friend.{p_end}
521
+ {p2col 8 12 12 2: 4.}If you use {opt vce(robust)}, be sure that your {it:other} dimension is not "fixed" but grows with N, or your SEs will be wrong.{p_end}
522
+ {p2col 8 12 12 2: 5.}If you use {opt vce(cluster ...)}, check that your number of clusters is high enough (50+ is a rule of thumb). If not, you are making the SEs even worse!{p_end}
523
+ {p2col 8 12 12 2: 6.}The panel variables (absvars) should probably be nested within the clusters (clustervars) due to the within-panel correlation induced by the FEs.
524
+ (this is not the case for *all* the absvars, only those that are treated as growing as N grows){p_end}
525
+ {p2col 8 12 12 2: 7.}If you run analytic or probability weights,
526
+ you are responsible for ensuring that the weights stay
527
+ constant within each unit of a fixed effect (e.g. individual),
528
+ or that it is correct to allow varying-weights for that case.
529
+ {p_end}
530
+ {p2col 8 12 12 2: 8.}Be aware that adding several HDFEs is not a panacea.
531
+ The first limitation is that it only uses within variation (more than acceptable if you have a large enough dataset).
532
+ The second and subtler limitation occurs if the fixed effects are themselves outcomes of the variable of interest (as crazy as it sounds).
533
+ For instance, imagine a regression where we study the effect of past corporate fraud on future firm performance.
534
+ We add firm, CEO and time fixed-effects (standard practice). This introduces a serious flaw: whenever a fraud event is discovered,
535
+ i) future firm performance will suffer, and ii) a CEO turnover will likely occur.
536
+ Moreover, after fraud events, the new CEOs are usually specialized in dealing with the aftershocks of such events
537
+ (and are usually accountants or lawyers).
538
+ The fixed effects of these CEOs will also tend to be quite low, as they tend to manage firms with very risky outcomes.
539
+ Therefore, the regressor (fraud) affects the fixed effect (identity of the incoming CEO).
540
+ Adding particularly low CEO fixed effects will then overstate the performance of the firm,
541
+ and thus {it:understate} the negative effects of fraud on future firm performance.{p_end}
542
+
543
+ {title:Missing Features}
544
+
545
+ {phang}(If you are interested in discussing these or others, feel free to {help reghdfe##contact:contact me})
546
+
547
+ {phang}Code, medium term:
548
+
549
+ {p2col 8 12 12 2: -}Complete GT preconditioning (v4){p_end}
550
+ {p2col 8 12 12 2: -}Improve algorithm that recovers the fixed effects (v5){p_end}
551
+ {p2col 8 12 12 2: -}Improve statistics and tests related to the fixed effects (v5){p_end}
552
+ {p2col 8 12 12 2: -}Implement a -bootstrap- option in DoF estimation (v5){p_end}
553
+
554
+ {phang}Code, long term:
555
+
556
+ {p2col 8 12 12 2: -}The interaction with cont vars (i.a#c.b) may suffer from numerical accuracy issues, as we are dividing by a sum of squares{p_end}
557
+ {p2col 8 12 12 2: -}Calculate exact DoF adjustment for 3+ HDFEs (note: not a problem with cluster VCE when one FE is nested within the cluster){p_end}
558
+ {p2col 8 12 12 2: -}More postestimation commands (lincom? margins?){p_end}
559
+
560
+ {phang}Theory:
561
+
562
+ {p2col 8 12 12 2: -}Add a more thorough discussion on the possible identification issues{p_end}
563
+ {p2col 8 12 12 2: -}Find out a way to use reghdfe iteratively with CUE
564
+ (right now only OLS/2SLS/GMM2S/LIML give the exact same results){p_end}
565
+ {p2col 8 12 12 2: -}Not sure if I should add an F-test for the absvars in the vce(robust) and vce(cluster) cases.
566
+ Discussion on e.g. -areg- (methods and formulas) and textbooks suggests not;
567
+ on the other hand, there may be alternatives:
568
+ {it:{browse "http://www.socialsciences.manchester.ac.uk/disciplines/economics/research/discussionpapers/pdf/EDP-1124.pdf" :A Heteroskedasticity-Robust F-Test Statistic for Individual Effects}}{p_end}
569
+
570
+ {marker examples}{...}
571
+ {title:Examples}
572
+
573
+ {hline}
574
+ {pstd}Setup{p_end}
575
+ {phang2}{cmd:. sysuse auto}{p_end}
576
+
577
+ {pstd}Simple case - one fixed effect{p_end}
578
+ {phang2}{cmd:. reghdfe price weight length, absorb(rep78)}{p_end}
579
+ {hline}
580
+
581
+ {pstd}As above, but also compute clustered standard errors{p_end}
582
+ {phang2}{cmd:. reghdfe price weight length, absorb(rep78) vce(cluster rep78)}{p_end}
583
+ {hline}
584
+
585
+ {pstd}Two and three sets of fixed effects{p_end}
586
+ {phang2}{cmd:. webuse nlswork}{p_end}
587
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year)}{p_end}
588
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year occ)}{p_end}
589
+ {hline}
590
+
591
+ {title:Advanced examples}
592
+
593
+ {pstd}Save the FEs as variables{p_end}
594
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(FE1=idcode FE2=year)}{p_end}
595
+
596
+ {pstd}Save first mobility group{p_end}
597
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa , absorb(idcode occ) groupv(mobility_occ)}{p_end}
598
+
599
+ {pstd}Factor interactions in the independent variables{p_end}
600
+ {phang2}{cmd:. reghdfe ln_w i.grade#i.age ttl_exp tenure not_smsa , absorb(idcode occ)}{p_end}
601
+
602
+ {pstd}Interactions in the absorbed variables (notice that only the {it:#} symbol is allowed){p_end}
603
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa , absorb(idcode#occ)}{p_end}
604
+
605
+ {pstd}Factorial interactions{p_end}
606
+ {phang2}{cmd:. reghdfe price weight length, absorb(rep78 turn##c.price)}{p_end}
607
+
608
+ {pstd}IV regression (this does NOT work anymore, please use the ivreghdfe package instead{p_end}
609
+ {phang2}{cmd:. sysuse auto}{p_end}
610
+ {phang2}{cmd:. ivreghdfe price weight (length=head), absorb(rep78)}{p_end}
611
+ {phang2}{cmd:. ivreghdfe price weight (length=head), absorb(rep78, resid)}{p_end}
612
+
613
+
614
+ {marker results}{...}
615
+ {title:Stored results}
616
+
617
+ {pstd}
618
+ {cmd:reghdfe} stores the following in {cmd:e()}:
619
+
620
+ {pstd}
621
+ {it:Note: it also keeps most e() results placed by the regression subcommands (ivreg2, ivregress)}
622
+
623
+ {synoptset 24 tabbed}{...}
624
+ {syntab:Scalars}
625
+ {synopt:{cmd:e(N)}}number of observations{p_end}
626
+ {synopt:{cmd:e(num_singletons)}}number of singleton observations{p_end}
627
+ {synopt:{cmd:e(N_full)}}number of observations including singletons{p_end}
628
+
629
+ {synopt:{cmd:e(N_hdfe)}}number of absorbed fixed-effects{p_end}
630
+ {synopt:{cmd:e(tss)}}total sum of squares{p_end}
631
+ {synopt:{cmd:e(rss)}}residual sum of squares{p_end}
632
+ {synopt:{cmd:e(r2)}}R-squared{p_end}
633
+ {synopt:{cmd:e(r2_a)}}adjusted R-squared{p_end}
634
+ {synopt:{cmd:e(r2_within)}}Within R-squared{p_end}
635
+ {synopt:{cmd:e(r2_a_within)}}Adjusted Within R-squared{p_end}
636
+ {synopt:{cmd:e(df_a)}}degrees of freedom lost due to the fixed effects{p_end}
637
+ {synopt:{cmd:e(rmse)}}root mean squared error{p_end}
638
+ {synopt:{cmd:e(ll)}}log-likelihood{p_end}
639
+ {synopt:{cmd:e(ll_0)}}log-likelihood of fixed-effect-only regression{p_end}
640
+ {synopt:{cmd:e(F)}}F statistic{p_end}
641
+ {synopt:{cmd:e(F_absorb)}}F statistic for absorbed effect {it:note: currently disabled}{p_end}
642
+ {synopt:{cmd:e(rank)}}rank of {cmd:e(V)}{p_end}
643
+ {synopt:{cmd:e(N_clustervars)}}number of cluster variables{p_end}
644
+
645
+ {synopt:{cmd:e(clust}#{cmd:)}}number of clusters for the #th cluster variable{p_end}
646
+ {synopt:{cmd:e(N_clust)}}number of clusters; minimum of {it:e(clust#)}{p_end}
647
+
648
+ {synopt:{cmd:e(K}#{cmd:)}}Number of categories of the #th absorbed FE{p_end}
649
+ {synopt:{cmd:e(M}#{cmd:)}}Number of redundant categories of the #th absorbed FE{p_end}
650
+ {synopt:{cmd:e(mobility)}}Sum of all {cmd:e(M#)}{p_end}
651
+ {synopt:{cmd:e(df_m)}}model degrees of freedom{p_end}
652
+ {synopt:{cmd:e(df_r)}}residual degrees of freedom{p_end}
653
+
654
+ {synopt:{cmd:e(report_constant)}}whether _cons was included in the regressions (default)
655
+ or as part of the fixed effects{p_end}
656
+
657
+ {synoptset 24 tabbed}{...}
658
+ {syntab:Macros}
659
+ {synopt:{cmd:e(cmd)}}{cmd:reghdfe}{p_end}
660
+ {synopt:{cmd:e(subcmd)}}either {cmd:regress}, {cmd:ivreg2} or {cmd:ivregress}{p_end}
661
+ {synopt:{cmd:e(model)}}{cmd:ols}, {cmd:iv}, {cmd:gmm2s}, {cmd:liml} or {cmd:cue}{p_end}
662
+ {synopt:{cmd:e(cmdline)}}command as typed{p_end}
663
+ {synopt:{cmd:e(dofmethod)}}dofmethod employed in the regression{p_end}
664
+ {synopt:{cmd:e(depvar)}}name of dependent variable{p_end}
665
+ {synopt:{cmd:e(indepvars)}}names of independent variables{p_end}
666
+ {synopt:{cmd:e(absvars)}}name of the absorbed variables or interactions{p_end}
667
+ {synopt:{cmd:e(title)}}title in estimation output{p_end}
668
+ {synopt:{cmd:e(clustvar)}}name of cluster variable{p_end}
669
+ {synopt:{cmd:e(clustvar}#{cmd:)}}name of the #th cluster variable{p_end}
670
+ {synopt:{cmd:e(vce)}}{it:vcetype} specified in {cmd:vce()}{p_end}
671
+ {synopt:{cmd:e(vcetype)}}title used to label Std. Err.{p_end}
672
+ {synopt:{cmd:e(stage)}}stage within an IV-regression; only if {it:stages()} was used{p_end}
673
+ {synopt:{cmd:e(properties)}}{cmd:b V}{p_end}
674
+
675
+ {synoptset 24 tabbed}{...}
676
+ {syntab:Matrices}
677
+ {synopt:{cmd:e(b)}}coefficient vector{p_end}
678
+ {synopt:{cmd:e(V)}}variance-covariance matrix of the estimators{p_end}
679
+
680
+ {synoptset 24 tabbed}{...}
681
+ {syntab:Functions}
682
+ {synopt:{cmd:e(sample)}}marks estimation sample{p_end}
683
+ {p2colreset}{...}
684
+
685
+ {marker contact}{...}
686
+ {title:Author}
687
+
688
+ {pstd}Sergio Correia{break}
689
+ Board of Governors of the Federal Reserve{break}
690
+ Email: {browse "mailto:[email protected]":[email protected]}
691
+ {p_end}
692
+
693
+ {marker user_guide}{...}
694
+ {title:User Guide}
695
+
696
+ {pstd}
697
+ A copy of this help file, as well as a more in-depth user guide is in development and will be available at {browse "http://scorreia.com/reghdfe"}.{p_end}
698
+
699
+ {marker updates}{...}
700
+ {title:Latest Updates}
701
+
702
+ {pstd}
703
+ {cmd:reghdfe} is updated frequently, and upgrades or minor bug fixes may not be immediately available in SSC.
704
+ To check or contribute to the latest version of reghdfe, explore the
705
+ {browse "https://github.com/sergiocorreia/reghdfe":Github repository}.
706
+ Bugs or missing features can be discussed through email or at the {browse "https://github.com/sergiocorreia/reghdfe/issues":Github issue tracker}.{p_end}
707
+
708
+ {pstd}
709
+ To see your current version and installed dependencies, type {cmd:reghdfe, version}
710
+ {p_end}
711
+
712
+ {marker acknowledgements}{...}
713
+ {title:Acknowledgements}
714
+
715
+ {pstd}
716
+ This package wouldn't have existed without the invaluable feedback and contributions of Paulo Guimaraes, Amine Ouazad, Mark Schaffer and Kit Baum. Also invaluable are the great bug-spotting abilities of many users.{p_end}
717
+
718
+ {pstd}In addition, {it:reghdfe} is build upon important contributions from the Stata community:{p_end}
719
+
720
+ {phang}{browse "https://ideas.repec.org/c/boc/bocode/s457101.html":reg2hdfe}, from Paulo Guimaraes,
721
+ and {browse "https://ideas.repec.org/c/boc/bocode/s456942.html":a2reg} from Amine Ouazad,
722
+ were the inspiration and building blocks on which reghdfe was built.{p_end}
723
+
724
+ {phang}{browse "http://www.repec.org/bocode/i/ivreg2.html":ivreg2}, by Christopher F Baum, Mark E Schaffer and Steven Stillman, is the package used by default for instrumental-variable regression.{p_end}
725
+
726
+ {phang}{browse "https://ideas.repec.org/c/boc/bocode/s457689.html":avar} by Christopher F Baum and Mark E Schaffer, is the package used for estimating the HAC-robust standard errors of ols regressions.{p_end}
727
+
728
+ {phang}{browse "http://econpapers.repec.org/software/bocbocode/s456797.htm":tuples} by Joseph Lunchman and Nicholas Cox, is used when computing standard errors with multi-way clustering (two or more clustering variables).{p_end}
729
+
730
+ {marker references}{...}
731
+ {title:References}
732
+
733
+ {p 0 0 2}
734
+ The algorithm underlying reghdfe is a generalization of the works by:
735
+
736
+ {phang}
737
+ Paulo Guimaraes and Pedro Portugal. "A Simple Feasible Alternative Procedure to Estimate
738
+ Models with High-Dimensional Fixed Effects".
739
+ {it:Stata Journal, 10(4), 628-649, 2010.}
740
+ {browse "http://www.stata-journal.com/article.html?article=st0212":[link]}
741
+ {p_end}
742
+
743
+ {phang}
744
+ Simen Gaure. "OLS with Multiple High Dimensional Category Dummies".
745
+ {it:Memorandum 14/2010, Oslo University, Department of Economics, 2010.}
746
+ {browse "https://ideas.repec.org/p/hhs/osloec/2010_014.html":[link]}
747
+ {p_end}
748
+
749
+ {p 0 0 2}
750
+ It addresses many of the limitation of previous works, such as possible lack of convergence, arbitrary slow convergence times,
751
+ and being limited to only two or three sets of fixed effects (for the first paper).
752
+ The paper explaining the specifics of the algorithm is a work-in-progress and available upon request.
753
+
754
+ {p 0 0 0}
755
+ If you use this program in your research, please cite either
756
+ the {browse "https://ideas.repec.org/c/boc/bocode/s457874.html":REPEC entry}
757
+ or the aforementioned papers.{p_end}
758
+
759
+ {title:Additional References}
760
+
761
+ {p 0 0 0}
762
+ For details on the Aitken acceleration technique employed, please see "method 3" as described by:
763
+
764
+ {phang}
765
+ Macleod, Allan J. "Acceleration of vector sequences by multi-dimensional Delta-2 methods."
766
+ {it:Communications in Applied Numerical Methods 2.4 (1986): 385-392.}
767
+ {p_end}
768
+
769
+ {p 0 0 0}
770
+ For the rationale behind interacting fixed effects with continuous variables, see:
771
+
772
+ {phang}
773
+ Duflo, Esther. "The medium run effects of educational expansion: Evidence from a large school construction program in Indonesia."
774
+ {it:Journal of Development Economics 74.1 (2004): 163-197.}{browse "http://www.sciencedirect.com/science/article/pii/S0304387803001846": [link]}
775
+ {p_end}
776
+
777
+ {p 0 0 0}
778
+ Also see:
779
+
780
+ {phang}Abowd, J. M., R. H. Creecy, and F. Kramarz 2002.
781
+ Computing person and firm effects using linked longitudinal employer-employee data.
782
+ {it:Census Bureau Technical Paper TP-2002-06.}
783
+ {p_end}
784
+
785
+ {phang}
786
+ Cameron, A. Colin & Gelbach, Jonah B. & Miller, Douglas L., 2011.
787
+ "Robust Inference With Multiway Clustering,"
788
+ {it:Journal of Business & Economic Statistics, American Statistical Association, vol. 29(2), pages 238-249.}
789
+ {p_end}
790
+
791
+ {phang}
792
+ Gormley, T. & Matsa, D. 2014.
793
+ "Common errors: How to (and not to) control for unobserved heterogeneity."
794
+ {it:The Review of Financial Studies, vol. 27(2), pages 617-661.}
795
+ {p_end}
796
+
797
+ {phang}
798
+ Mittag, N. 2012.
799
+ "New methods to estimate models with large sets of fixed effects with an application to matched employer-employee data from Germany."
800
+ {it:{browse "http://doku.iab.de/fdz/reporte/2012/MR_01-12_EN.pdf":FDZ-Methodenreport 02/2012}.}
801
+ {p_end}
30/replication_package/Adofiles/reghdfe_2019/reghdfe_accelerations.mata ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mata:
2
+
3
+ // --------------------------------------------------------------------------
4
+ // Acceleration Schemes
5
+ // --------------------------------------------------------------------------
6
+
7
+ `Variables' function accelerate_test(`FixedEffects' S, `Variables' y, `FunctionP' T) {
8
+ `Integer' iter, g
9
+ `Variables' resid
10
+ `Factor' f
11
+ pragma unset resid
12
+
13
+ assert(S.converged == 0)
14
+
15
+ for (iter=1; iter<=S.maxiter; iter++) {
16
+ for (g=1; g<=S.G; g++) {
17
+ f = S.factors[g]
18
+ if (g==1) resid = y - panelmean(f.sort(y), f)[f.levels, .]
19
+ else resid = resid - panelmean(f.sort(resid), f)[f.levels, .]
20
+ }
21
+ if (check_convergence(S, iter, resid, y)) break
22
+ y = resid
23
+ }
24
+ return(resid)
25
+ }
26
+
27
+ // --------------------------------------------------------------------------
28
+
29
+ `Variables' function accelerate_none(`FixedEffects' S, `Variables' y, `FunctionP' T) {
30
+ `Integer' iter
31
+ `Variables' resid
32
+ pragma unset resid
33
+
34
+ assert(S.converged == 0)
35
+
36
+ for (iter=1; iter<=S.maxiter; iter++) {
37
+ (*T)(S, y, resid) // Faster version of "resid = S.T(y)"
38
+ if (check_convergence(S, iter, resid, y)) break
39
+ y = resid
40
+ }
41
+ return(resid)
42
+ }
43
+ // --------------------------------------------------------------------------
44
+
45
+ // Start w/out acceleration, then switch to CG
46
+ `Variables' function accelerate_hybrid(`FixedEffects' S, `Variables' y, `FunctionP' T) {
47
+ `Integer' iter, accel_start
48
+ `Variables' resid
49
+ pragma unset resid
50
+
51
+ accel_start = 6
52
+ assert(S.converged == 0)
53
+
54
+ for (iter=1; iter<=accel_start; iter++) {
55
+ (*T)(S, y, resid) // Faster version of "resid = S.T(y)"
56
+ if (check_convergence(S, iter, resid, y)) break
57
+ y = resid
58
+ }
59
+
60
+ T = &transform_sym_kaczmarz() // Override
61
+
62
+ return(accelerate_cg(S, y, T))
63
+ }
64
+
65
+ // --------------------------------------------------------------------------
66
+ // Memory cost is approx = 4*size(y) (actually 3 since y is already there)
67
+ // But we need to add maybe 1 more due to u:*v
68
+ // And I also need to check how much does project and T use..
69
+ // Double check with a call to memory
70
+
71
+ // For discussion on the stopping criteria, see the following presentation:
72
+ // Arioli & Gratton, "Least-squares problems, normal equations, and stopping criteria for the conjugate gradient method". URL: https://www.stfc.ac.uk/SCD/resources/talks/Arioli-NAday2008.pdf
73
+
74
+ // Basically, we will use the Hestenes and Stiefel rule
75
+
76
+ `Variables' function accelerate_cg(`FixedEffects' S, `Variables' y, `FunctionP' T) {
77
+ // BUGBUG iterate the first 6? without acceleration??
78
+ `Integer' iter, d, Q
79
+ `Variables' r, u, v
80
+ `RowVector' alpha, beta, ssr, ssr_old, improvement_potential
81
+ `Matrix' recent_ssr
82
+ pragma unset r
83
+ pragma unset v
84
+
85
+ assert(S.converged == 0)
86
+ if (S.timeit) timer_on(70)
87
+ Q = cols(y)
88
+
89
+ d = 1 // BUGBUG Set it to 2/3 // Number of recent SSR values to use for convergence criteria (lower=faster & riskier)
90
+ // A discussion on the stopping criteria used is described in
91
+ // http://scicomp.stackexchange.com/questions/582/stopping-criteria-for-iterative-linear-solvers-applied-to-nearly-singular-system/585#585
92
+
93
+ if (S.timeit) timer_on(73)
94
+ improvement_potential = weighted_quadcolsum(S, y, y)
95
+ recent_ssr = J(d, Q, .)
96
+ if (S.timeit) timer_off(73)
97
+
98
+ if (S.timeit) timer_on(71)
99
+ (*T)(S, y, r, 1)
100
+ if (S.timeit) timer_off(71)
101
+ if (S.timeit) timer_on(73)
102
+ ssr = weighted_quadcolsum(S, r, r) // cross(r,r) when cols(y)==1 // BUGBUG maybe diag(quadcross()) is faster?
103
+ u = r
104
+ if (S.timeit) timer_off(73)
105
+
106
+ for (iter=1; iter<=S.maxiter; iter++) {
107
+ if (S.timeit) timer_on(71)
108
+ (*T)(S, u, v, 1) // This is the hottest loop in the entire program
109
+ if (S.timeit) timer_off(71)
110
+ if (S.timeit) timer_on(73)
111
+ alpha = safe_divide( ssr , weighted_quadcolsum(S, u, v) )
112
+ if (S.timeit) timer_off(73)
113
+ if (S.timeit) timer_on(74)
114
+ recent_ssr[1 + mod(iter-1, d), .] = alpha :* ssr
115
+ improvement_potential = improvement_potential - alpha :* ssr
116
+ y = y - alpha :* u
117
+ if (S.timeit) timer_off(74)
118
+ if (S.timeit) timer_on(75)
119
+ if (S.compute_rre & !S.prune) reghdfe_rre_benchmark(y[., 1], S.rre_true_residual, S.rre_depvar_norm)
120
+ r = r - alpha :* v
121
+ ssr_old = ssr
122
+ if (S.timeit) timer_off(75)
123
+ if (S.timeit) timer_on(73)
124
+ if (S.verbose>=5) r
125
+ ssr = weighted_quadcolsum(S, r, r)
126
+ beta = safe_divide( ssr , ssr_old) // Fletcher-Reeves formula, but it shouldn't matter in our problem
127
+ if (S.timeit) timer_off(73)
128
+ u = r + beta :* u
129
+ // Convergence if sum(recent_ssr) > tol^2 * improvement_potential
130
+ if (S.timeit) timer_on(76)
131
+ if ( check_convergence(S, iter, colsum(recent_ssr), improvement_potential, "hestenes") ) {
132
+ break
133
+ if (S.timeit) timer_off(76)
134
+ }
135
+ if (S.timeit) timer_off(76)
136
+ }
137
+ if (S.timeit) timer_off(70)
138
+ return(y)
139
+ }
140
+
141
+ // --------------------------------------------------------------------------
142
+
143
+ `Variables' function accelerate_sd(`FixedEffects' S, `Variables' y, `FunctionP' T) {
144
+ `Integer' iter, g
145
+ `Variables' proj
146
+ `RowVector' t
147
+ pragma unset proj
148
+
149
+ assert(S.converged == 0)
150
+
151
+ for (iter=1; iter<=S.maxiter; iter++) {
152
+ (*T)(S, y, proj, 1)
153
+ if (check_convergence(S, iter, y-proj, y)) break
154
+ t = safe_divide( weighted_quadcolsum(S, y, proj) , weighted_quadcolsum(S, proj, proj) )
155
+ if (uniform(1,1)<0.1) t = 1 // BUGBUG: Does this REALLY help to randomly unstuck an iteration?
156
+
157
+ y = y - t :* proj
158
+ if (S.compute_rre & !S.prune) reghdfe_rre_benchmark(y[., 1], S.rre_true_residual, S.rre_depvar_norm)
159
+
160
+ if (S.storing_alphas) {
161
+ for (g=1; g<=S.G; g++) {
162
+ //g, ., ., t
163
+ //asarray(S.factors[g].extra, "alphas"), asarray(S.factors[g].extra, "tmp_alphas")
164
+ if (S.save_fe[g]) {
165
+ asarray(S.factors[g].extra, "alphas",
166
+ asarray(S.factors[g].extra, "alphas") +
167
+ t :* asarray(S.factors[g].extra, "tmp_alphas")
168
+ )
169
+ }
170
+ }
171
+ }
172
+ }
173
+ return(y-proj)
174
+ }
175
+
176
+ // --------------------------------------------------------------------------
177
+ // This is method 3 of Macleod (1986), a vector generalization of the Aitken-Steffensen method
178
+ // Also: "when numerically computing the sequence.. stop.. when rounding errors become too
179
+ // important in the denominator, where the ^2 operation may cancel too many significant digits"
180
+ // Note: Sometimes the iteration gets "stuck"; can we unstuck it with adding randomness
181
+ // in the accelerate decision? There should be a better way.. (maybe symmetric kacz instead of standard one?)
182
+
183
+ `Variables' function accelerate_aitken(`FixedEffects' S, `Variables' y, `FunctionP' T) {
184
+ `Integer' iter
185
+ `Variables' resid, y_old, delta_sq
186
+ `Boolean' accelerate
187
+ `RowVector' t
188
+ pragma unset resid
189
+
190
+ assert(S.converged == 0)
191
+ y_old = J(rows(y), cols(y), .)
192
+
193
+ for (iter=1; iter<=S.maxiter; iter++) {
194
+
195
+ (*T)(S, y, resid)
196
+ accelerate = iter>=S.accel_start & !mod(iter,S.accel_freq)
197
+
198
+ // Accelerate
199
+ if (accelerate) {
200
+ delta_sq = resid - 2 * y + y_old // = (resid - y) - (y - y_old) // Equivalent to D2.resid
201
+ // t is just (d'd2) / (d2'd2)
202
+ t = safe_divide( weighted_quadcolsum(S, (resid - y) , delta_sq) , weighted_quadcolsum(S, delta_sq , delta_sq) )
203
+ resid = resid - t :* (resid - y)
204
+ }
205
+
206
+
207
+ // Only check converge on non-accelerated iterations
208
+ // BUGBUG: Do we need to disable the check when accelerating?
209
+ // if (check_convergence(S, iter, accelerate? resid :* . : resid, y)) break
210
+ if (S.compute_rre & !S.prune) reghdfe_rre_benchmark(resid[., 1], S.rre_true_residual, S.rre_depvar_norm)
211
+ if (check_convergence(S, iter, resid, y)) break
212
+ y_old = y // y_old is resid[iter-2]
213
+ y = resid // y is resid[iter-1]
214
+ }
215
+ return(resid)
216
+ }
217
+
218
+ // --------------------------------------------------------------------------
219
+
220
+ `Boolean' check_convergence(`FixedEffects' S, `Integer' iter, `Variables' y_new, `Variables' y_old,| `String' method) {
221
+ `Boolean' is_last_iter
222
+ `Real' update_error
223
+ `Real' eps_threshold
224
+
225
+ // max() ensures that the result when bunching vars is at least as good as when not bunching
226
+ if (args()<5) method = "vectors"
227
+
228
+ if (S.G==1 & !S.storing_alphas) {
229
+ // Shortcut for trivial case (1 FE)
230
+ update_error = 0
231
+ }
232
+ else if (method=="vectors") {
233
+ update_error = max(mean(reldif(y_new, y_old), S.weight))
234
+ }
235
+ else if (method=="hestenes") {
236
+ // If the regressor is perfectly explained by the absvars, we can have SSR very close to zero but negative
237
+ // (so sqrt is missing)
238
+
239
+ eps_threshold = 1e-15 // 10 * epsilon(1) ; perhaps too aggressive and should be 1e-14 ?
240
+ if (S.verbose > 0 & all(y_new :< eps_threshold)) {
241
+ printf("{txt} note: eps. is very close to zero (%g), so hestenes assumed convergence to avoid numerical precision errors\n", min(y_new))
242
+ }
243
+ update_error = safe_divide(edittozerotol(y_new, eps_threshold ),
244
+ editmissing(y_old, epsilon(1)),
245
+ epsilon(1) )
246
+ update_error = sqrt(max(update_error))
247
+ }
248
+ else {
249
+ exit(error(100))
250
+ }
251
+
252
+ assert_msg(!missing(update_error), "update error is missing")
253
+
254
+ S.converged = S.converged + (update_error <= S.tolerance)
255
+ is_last_iter = iter==S.maxiter
256
+
257
+ if (S.converged >= S.min_ok) {
258
+ S.iteration_count = max((iter, S.iteration_count))
259
+ S.accuracy = max((S.accuracy, update_error))
260
+ if (S.verbose==1) printf("{txt} converged in %g iterations (last error =%3.1e)\n", iter, update_error)
261
+ if (S.verbose>1) printf("\n{txt} - Converged in %g iterations (last error =%3.1e)\n", iter, update_error)
262
+ }
263
+ else if (is_last_iter & S.abort) {
264
+ printf("\n{err}convergence not achieved in %g iterations (last error=%e); try increasing maxiter() or decreasing tol().\n", S.maxiter, update_error)
265
+ exit(430)
266
+ }
267
+ else {
268
+ if ((S.verbose>=2 & S.verbose<=3 & mod(iter,1)==0) | (S.verbose==1 & mod(iter,1)==0)) {
269
+ printf("{res}.{txt}")
270
+ displayflush()
271
+ }
272
+ if ( (S.verbose>=2 & S.verbose<=3 & mod(iter,100)==0) | (S.verbose==1 & mod(iter,100)==0) ) {
273
+ printf("{txt}%9.1f\n ", update_error/S.tolerance)
274
+ }
275
+
276
+ if (S.verbose==4 & method!="hestenes") printf("{txt} iter={res}%4.0f{txt}\tupdate_error={res}%-9.6e\n", iter, update_error)
277
+ if (S.verbose==4 & method=="hestenes") printf("{txt} iter={res}%4.0f{txt}\tupdate_error={res}%-9.6e {txt}norm(ssr)={res}%g\n", iter, update_error, norm(y_new))
278
+
279
+ if (S.verbose>=5) {
280
+ printf("\n{txt} iter={res}%4.0f{txt}\tupdate_error={res}%-9.6e{txt}\tmethod={res}%s\n", iter, update_error, method)
281
+ "old:"
282
+ y_old
283
+ "new:"
284
+ y_new
285
+ }
286
+ }
287
+ return(S.converged >= S.min_ok)
288
+ }
289
+
290
+ // --------------------------------------------------------------------------
291
+
292
+ `Matrix' weighted_quadcolsum(`FixedEffects' S, `Matrix' x, `Matrix' y) {
293
+ // BUGBUG: override S.has_weights with pruning
294
+ // One approach is faster for thin matrices
295
+ // We are using cross instead of quadcross but it should not matter for this use
296
+ if (S.has_weights) {
297
+ if (cols(x) < 14) {
298
+ return(quadcross(x :* y, S.weight)')
299
+ }
300
+ else {
301
+ return(diagonal(quadcross(x, S.weight, y))')
302
+ }
303
+ }
304
+ else {
305
+ if (cols(x) < 25) {
306
+ return(diagonal(quadcross(x, y))')
307
+ }
308
+ else {
309
+ return(colsum(x :* y))
310
+ }
311
+ }
312
+ }
313
+
314
+
315
+ // RRE benchmarking
316
+ // || yk - y || / || y || === || ek - e || / || y ||
317
+ `Real' reghdfe_rre_benchmark(`Vector' resid, `Vector' true_resid, `Real' norm_y) {
318
+ `Real' ans
319
+ ans = norm(resid - true_resid) / norm_y
320
+ return(ans)
321
+ }
322
+
323
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_bipartite.mata ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Bipartite Graphs ---------------------------------------------------------
2
+ // - For simplicity, assume the graph represent (firm, ceo) pairs
3
+ // - TODO: Check when we don't need all these objects anymore and clean them up!
4
+
5
+ mata:
6
+
7
+ class BipartiteGraph
8
+ {
9
+ // Computed by init()
10
+ `Boolean' verbose
11
+ `Integer' N // Num. obs
12
+ `Integer' N1 // Num. levels of FE 1
13
+ `Integer' N2 // Num. levels of FE 2
14
+ `Integer' N12 // N1 + N2
15
+ `FactorPointer' PF1
16
+ `FactorPointer' PF2
17
+ `Factor' F12
18
+ `Factor' F12_1
19
+ `Factor' F12_2
20
+
21
+ // Computed by init_zigzag()
22
+ `Vector' queue
23
+ `Vector' stack
24
+ `Vector' keys1_by_2
25
+ `Vector' keys2_by_1
26
+ `Integer' num_subgraphs
27
+ `Variable' subgraph_id // (optional)
28
+
29
+ // Computed by compute_cores()
30
+ `Vector' cores
31
+ `Vector' drop_order
32
+
33
+ // Computed after prune_1core()
34
+ `Integer' N_drop
35
+ `Variable' mask // mask (0|1) of obs that are dropped after prunning of degree-1 edges
36
+ `Boolean' prune // Whether to recursively prune degree-1 edges
37
+ `Vector' drop2idx
38
+ `Matrix' drop2info
39
+ `Variable' sorted_w
40
+ `Boolean' has_weights
41
+ `Variable' sorted_true_weight
42
+
43
+
44
+ // Methods
45
+ `Void' init()
46
+ `Real' init_zigzag()
47
+ `Void' compute_cores()
48
+ `Void' prune_1core()
49
+ `Variables' expand_1core()
50
+ `Variables' partial_out()
51
+ `Variables' __partial_out_map()
52
+ `Variables' __partial_out_laplacian()
53
+ }
54
+
55
+
56
+ `Void' BipartiteGraph::init(`FactorPointer' PF1,
57
+ `FactorPointer' PF2,
58
+ `Boolean' verbose)
59
+ {
60
+ if (verbose) {
61
+ printf("\n{txt}## Initializing bipartite graph\n\n")
62
+ printf(" - FE #1: {res}%s{txt}\n", invtokens((*PF1).varlist))
63
+ printf(" - FE #2: {res}%s{txt}\n", invtokens((*PF2).varlist))
64
+ }
65
+ this.verbose = verbose
66
+ this.PF1 = PF1
67
+ this.PF2 = PF2
68
+
69
+ N = (*PF1).num_obs
70
+ N1 = (*PF1).num_levels
71
+ N2 = (*PF2).num_levels
72
+ N12 = N1 + N2
73
+ (*PF1).panelsetup() // Just in case
74
+ (*PF2).panelsetup() // Just in case
75
+
76
+ // F12 must be created from F1.levels and F2.levels (not from the original keys)
77
+ // This is set automatically by join_factors() with the correct flag:
78
+ // F12 = join_factors(F1, F2, ., ., 1)
79
+ // But you can also run (slower)
80
+ // F12 = _factor( (F1.levels, F2.levels) )
81
+ // asarray(F12.extra, "levels_as_keys", 1)
82
+ if (verbose) printf("{txt} - computing F12: ")
83
+ // join_factors(F1, (*PF2) [, count_levels, save_keys, levels_as_keys])
84
+ F12 = join_factors((*PF1), (*PF2), ., ., 1)
85
+ if (verbose) printf("{txt} edges found: {res}%-10.0gc{txt}\n", F12.num_levels)
86
+ F12.panelsetup()
87
+
88
+ if (verbose) printf("{txt} - computing F12_1:")
89
+ // _factor(data [, integers_only, verbose, method, sort_levels, count_levels, hash_ratio, save_keys])
90
+ F12_1 = _factor(F12.keys[., 1], 1, 0, "", 1, 1, ., 0)
91
+ if (verbose) printf("{txt} edges found: {res}%-10.0gc{txt}\n", F12_1.num_levels)
92
+ F12_1.panelsetup()
93
+
94
+ if (verbose) printf("{txt} - computing F12_2:")
95
+ F12_2 = _factor(F12.keys[., 2], 1, 0, "", 1, 1, ., 0)
96
+ if (verbose) printf("{txt} edges found: {res}%-10.0gc{txt}\n", F12_2.num_levels)
97
+ F12_2.panelsetup()
98
+ }
99
+
100
+
101
+ // --------------------------------------------------------------------------
102
+ // init_zigzag()
103
+ // --------------------------------------------------------------------------
104
+ // Construct -queue- and -stack- vectors that allow zigzag iteration
105
+ //
106
+ // queue: firm and CEOs that will be processed, in the required order
107
+ // note: negative values indicate CEOs
108
+ //
109
+ // stack: for each firm/CEO, the list of nodes it connects to
110
+ // note: stacks are zero-separated
111
+ //
112
+ // --------------------------------------------------------------------------
113
+ // As a byproduct, also computes the number of disjoint subgraphs.
114
+ // See the algorithm from on Abowd, Creecy and Kramarz (WP 2002) p4. Sketch:
115
+ //
116
+ // g = 0
117
+ // While there are firms w/out a group:
118
+ // g++
119
+ // Assign the first firm w/out a group to group g
120
+ // Repeat until no further changes:
121
+ // Add all persons employed by a firm in g to g
122
+ // Add all firms that employ persons in g to g
123
+ // return(g)
124
+ // --------------------------------------------------------------------------
125
+ // --------------------------------------------------------------------------
126
+ `Real' BipartiteGraph::init_zigzag(| `Boolean' save_subgraphs)
127
+ {
128
+ `Vector' counter1
129
+ `Vector' counter2
130
+ `Vector' done1
131
+ `Vector' done2
132
+
133
+ `Integer' i_stack // use to process the queue
134
+ `Integer' last_i // use to fill out the queue
135
+ `Integer' start_j // use to search for firms to start graph enumeration
136
+ `Integer' i_queue
137
+ `Integer' id // firm number if id>0; error if id=0; ceo number if id<0
138
+ `Integer' j // firm # (or viceversa)
139
+ `Integer' k // ceo # (or viceversa)
140
+ `Integer' c // temporary counter
141
+ `Integer' i // temporary iterator
142
+
143
+ `Matrix' matches // list of CEOs that matched with firm j (or viceversa)
144
+
145
+ if (verbose) printf("\n{txt}## Initializing zigzag iterator for bipartite graph\n\n")
146
+ assert(F12_1.panel_is_setup)
147
+ assert(F12_2.panel_is_setup)
148
+ assert(asarray(F12.extra, "levels_as_keys") == 1)
149
+
150
+ // If subgraph_id (mobility groups) is anything BUT zero, we will save them
151
+ if (args()==0 | save_subgraphs==.) save_subgraphs = 0
152
+ if (save_subgraphs) {
153
+ subgraph_id = J(N2, 1, .)
154
+ }
155
+
156
+ queue = J(N12, 1, 0)
157
+ stack = J(F12.num_levels + N12, 1, .) // there are N12 zeros
158
+ counter1 = J(N1, 1, 0)
159
+ counter2 = J(N2, 1, 0)
160
+
161
+ keys1_by_2 = F12_2.sort(F12.keys[., 1])
162
+ keys2_by_1 = F12_1.sort(F12.keys[., 2])
163
+ done1 = J(N1, 1, 0) // if a firm is already on the queue
164
+ done2 = J(N2, 1, 0) // if a CEO is already on the queue
165
+
166
+ // Use -j- for only for firms and -k- only for CEOs
167
+ // Use -i_queue- to iterate over the queue and -i_stack- over the stack
168
+ // Use -last_i- to fill out the queue (so its the last filled value)
169
+ // Use -i- to iterate arbitrary vectors
170
+ // Use -id- to indicate a possible j or k (negative for k)
171
+ // Use -start_j- to remember where to start searching for new subgraphs
172
+
173
+ i_stack = 0
174
+ last_i = 0
175
+ start_j = 1
176
+ num_subgraphs = 0
177
+
178
+ for (i_queue=1; i_queue<=N12; i_queue++) {
179
+ id = queue[i_queue] // >0 if firm ; <0 if CEO; ==0 if nothing yet
180
+ j = k = . // just to avoid bugs
181
+
182
+ // Pick starting point (useful if the graph is disjoint!)
183
+ if (id == 0) {
184
+ assert(last_i + 1 == i_queue)
185
+ for (j=start_j; j<=N1; j++) {
186
+ if (!done1[j]) {
187
+ queue[i_queue] = id = j
188
+ start_j = j + 1
189
+ ++last_i
190
+ break
191
+ }
192
+ }
193
+ // printf("{txt} - starting subgraph with firm %g\n", j)
194
+ ++num_subgraphs
195
+ assert(id != 0) // Sanity check
196
+ }
197
+
198
+ if (id > 0) {
199
+ // It's a firm
200
+ j = id
201
+ done1[j] = 1
202
+ matches = panelsubmatrix(keys2_by_1, j, F12_1.info)
203
+ for (i=1; i<=rows(matches); i++) {
204
+ k = matches[i]
205
+ c = counter2[k]
206
+ counter2[k] = c + 1
207
+ if (!done2[k]) {
208
+ if (!c) {
209
+ queue[++last_i] = -k
210
+ }
211
+ stack[++i_stack] = k
212
+ }
213
+ }
214
+ stack[++i_stack] = 0
215
+ }
216
+ else {
217
+ // It's a CEO
218
+ k = -id
219
+ done2[k] = 1
220
+ matches = panelsubmatrix(keys1_by_2, k, F12_2.info)
221
+ for (i=1; i<=rows(matches); i++) {
222
+ j = matches[i]
223
+ c = counter1[j]
224
+ counter1[j] = c + 1
225
+ if (!done1[j]) {
226
+ if (!c) {
227
+ queue[++last_i] = j
228
+ }
229
+ stack[++i_stack] = j
230
+ }
231
+ }
232
+ stack[++i_stack] = 0
233
+ if (save_subgraphs) subgraph_id[k] = num_subgraphs
234
+ }
235
+ }
236
+
237
+ // Sanity checks
238
+ assert(counter1 == F12_1.counts)
239
+ assert(counter2 == F12_2.counts)
240
+ assert(!anyof(queue, 0)) // queue can't have zeros at the end
241
+ assert(allof(done1, 1))
242
+ assert(allof(done2, 1))
243
+ assert(!missing(queue))
244
+ assert(!missing(stack))
245
+
246
+ if (save_subgraphs) subgraph_id = subgraph_id[(*PF2).levels]
247
+
248
+ if (verbose) printf("{txt} - disjoint subgraphs found: {res}%g{txt}\n", num_subgraphs)
249
+ return(num_subgraphs)
250
+ }
251
+
252
+
253
+ // --------------------------------------------------------------------------
254
+ // compute_cores()
255
+ // --------------------------------------------------------------------------
256
+ // Computes vertex core numbers, which allows k-core pruning
257
+ // Algorithm used is listed here: https://arxiv.org/abs/cs/0310049
258
+ // --------------------------------------------------------------------------
259
+ // Note:
260
+ // maybe use the k-cores for something useful? eg:
261
+ // we might want to weight the core numbers by the strength (# of obs together)
262
+ // https://arxiv.org/pdf/1611.02756.pdf --> # of butterflies in bipartite graph
263
+ // this paper also has useful data sources for benchmarks
264
+ // # of primary and secondary vertices, edges
265
+ // --------------------------------------------------------------------------
266
+
267
+ `Void' BipartiteGraph::compute_cores()
268
+ {
269
+ `Factor' Fbin
270
+ `Boolean' is_firm
271
+ `Integer' M, ND, j, jj
272
+ `Integer' i_v, i_u, i_w
273
+ `Integer' pv, pu, pw
274
+ `Integer' v, u, w
275
+ `Integer' dv, du
276
+ `Vector' bin, deg, pos, invpos, vert, neighbors
277
+
278
+ if (verbose) printf("\n{txt}## Computing vertex core numbers\n\n")
279
+
280
+ // v, u, w are vertices; <0 for CEOs and >0 for firms
281
+ // vert is sorted by degree; deg is unsorted
282
+ // pos[i] goes from sorted to unsorted, so:
283
+ // vert[i] === original_vert[ pos[i] ]
284
+ // invpos goes from unsorted to sorted, so:
285
+ // vert[invpos[j]] === original_vert[j]
286
+
287
+ // i_u represents the pos. of u in the sorted tables
288
+ // pu represents the pos. of u in the unsorted/original tables
289
+
290
+ assert(F12_1.panel_is_setup==1)
291
+ assert(F12_2.panel_is_setup==1)
292
+ assert(rows(queue)==N12)
293
+ assert(rows(keys1_by_2)==F12.num_levels)
294
+ assert(rows(keys2_by_1)==F12.num_levels)
295
+
296
+ deg = F12_1.counts \ F12_2.counts
297
+ ND = max(deg) // number of degrees
298
+
299
+ Fbin = _factor(deg, 1, 0)
300
+ Fbin.panelsetup()
301
+
302
+ bin = J(ND, 1, 0)
303
+ bin[Fbin.keys] = Fbin.counts
304
+ bin = rows(bin) > 1 ? runningsum(1 \ bin[1..ND-1]) : 1
305
+
306
+ pos = Fbin.p
307
+ invpos = invorder(Fbin.p)
308
+
309
+ vert = Fbin.sort(( (1::N1) \ (-1::-N2) ))
310
+
311
+ for (i_v=1; i_v<=N12; i_v++) {
312
+ v = vert[i_v]
313
+ is_firm = (v > 0)
314
+
315
+ neighbors = is_firm ? panelsubmatrix(keys2_by_1, v, F12_1.info) : panelsubmatrix(keys1_by_2, -v, F12_2.info)
316
+ M = rows(neighbors)
317
+
318
+ for (j=1; j<=M; j++) {
319
+ pv = pos[i_v]
320
+ jj = neighbors[j]
321
+ pu = is_firm ? N1 + jj : jj // is_firm is *not* for the neighbor
322
+ dv = deg[pv]
323
+ du = deg[pu]
324
+
325
+ if (dv < du) {
326
+ i_w = bin[du]
327
+ w = vert[i_w]
328
+ u = is_firm ? -jj : jj // is_firm is *not* for the neighbor
329
+ if (u != w) {
330
+ pw = pos[i_w]
331
+ i_u = invpos[pu]
332
+ pos[i_u] = pw
333
+ pos[i_w] = pu
334
+ vert[i_u] = w
335
+ vert[i_w] = u
336
+ invpos[pu] = i_w
337
+ invpos[pw] = i_u
338
+ }
339
+ bin[du] = bin[du] + 1
340
+ deg[pu] = deg[pu] - 1
341
+ }
342
+ } // end for neighbor u (u ~ v)
343
+ } // end for each node v
344
+
345
+ if (verbose) {
346
+ //printf("{txt} Table: core numbers and vertex count\n")
347
+ Fbin = _factor(deg, 1, 0)
348
+ //printf("\n")
349
+ mm_matlist(Fbin.counts, "%-8.0gc", 2, strofreal(Fbin.keys), "Freq.", "Core #")
350
+ printf("\n")
351
+ }
352
+
353
+ // ((F1.keys \ F2.keys), (F12_1.keys \ -F12_2.keys))[selectindex(deg:==1), .]
354
+
355
+ // Store the values in the class
356
+ swap(drop_order, vert)
357
+ swap(cores, deg)
358
+ }
359
+
360
+ // --------------------------------------------------------------------------
361
+ // prune_1core()
362
+ // --------------------------------------------------------------------------
363
+ // Prune edges with degree-1
364
+ // That is, recursively remove CEOs that only worked at one firm,
365
+ // and firms that only had one CEO in the sample, until every agent
366
+ // in the dataset has at least two matches
367
+ // --------------------------------------------------------------------------
368
+ `Void' BipartiteGraph::prune_1core(| `Variable' weight)
369
+ {
370
+ `Integer' N_drop2, i, j, i1, i2, j1, j2, K_drop2
371
+ `Vector' drop1, drop2
372
+ `Vector' tmp_mask
373
+ `Vector' proj1, proj2
374
+ `Variable' w, tmp_weight
375
+
376
+ has_weights = (args()>0 & rows(weight) > 1)
377
+ if (has_weights) sorted_true_weight = (*PF1).sort(weight)
378
+ tmp_weight = has_weights ? weight : J(N, 1, 1)
379
+
380
+ N_drop = sum(cores :== 1)
381
+ if (!N_drop) {
382
+ if (verbose) printf("{txt} - no 1-core vertices found\n")
383
+ prune = 0
384
+ return
385
+ }
386
+ if (verbose) printf("{txt} - 1-core vertices found: {res}%g{txt}\n", N_drop)
387
+
388
+ drop_order = drop_order[1..N_drop]
389
+ drop1 = `selectindex'(cores[1..N1] :== 1)
390
+ cores = .
391
+ drop1 = (1::N1)[drop1]
392
+ drop2 = -select(drop_order, drop_order:<0)
393
+
394
+ K_drop2 = rows(drop2)
395
+ N_drop2 = K_drop2 ? sum((*PF2).info[drop2, 2] :- (*PF2).info[drop2, 1] :+ 1) : 0
396
+
397
+ tmp_mask = J(N1, 1, 0)
398
+ if (rows(drop1)) tmp_mask[drop1] = J(rows(drop1), 1, 1)
399
+ mask = tmp_mask[(*PF1).levels, 1]
400
+ tmp_mask = J(N2, 1, 0)
401
+ if (K_drop2) tmp_mask[drop2] = J(K_drop2, 1, 1)
402
+ mask = mask :| tmp_mask[(*PF2).levels, 1]
403
+ tmp_mask = .
404
+
405
+ drop2idx = J(N_drop2, 1, .)
406
+ drop2info = J(N2, 2, .)
407
+
408
+ j1 = 1
409
+ for (i=1; i<=K_drop2; i++) {
410
+ j = drop2[i]
411
+ i1 = (*PF2).info[j, 1]
412
+ i2 = (*PF2).info[j, 2]
413
+
414
+ j2 = j1 + i2 - i1
415
+ drop2idx[j1::j2] = i1::i2
416
+ drop2info[j, .] = (j1, j2)
417
+ j1 = j2 + 1
418
+ }
419
+
420
+ if (!(*PF2).is_sorted) {
421
+ assert(((*PF2).p != J(0, 1, .)))
422
+ drop2idx = (*PF2).p[drop2idx, .]
423
+ }
424
+
425
+ if (!(*PF1).is_sorted) {
426
+ assert(((*PF1).inv_p != J(0, 1, .)))
427
+ drop2idx = invorder((*PF1).p)[drop2idx, .]
428
+ }
429
+
430
+ // To undo pruning, I need (*PF1).info[drop1, .] & drop2info & drop2idx
431
+
432
+ // Set weights of pruned obs. to zero
433
+ tmp_weight[`selectindex'(mask)] = J(sum(mask), 1, 0)
434
+
435
+ // Update sorted weights for g=1,2
436
+ w = (*PF1).sort(tmp_weight)
437
+ asarray((*PF1).extra, "has_weights", 1)
438
+ asarray((*PF1).extra, "weights", w)
439
+ asarray((*PF1).extra, "weighted_counts", `panelsum'(w, (*PF1).info))
440
+ w = .
441
+
442
+ w = (*PF2).sort(tmp_weight)
443
+ tmp_weight = . // cleanup
444
+ asarray((*PF2).extra, "has_weights", 1)
445
+ asarray((*PF2).extra, "weights", w)
446
+ asarray((*PF2).extra, "weighted_counts", `panelsum'(w, (*PF2).info))
447
+ w = .
448
+
449
+ // Select obs where both FEs are degree-1 (and thus omitted)
450
+ sorted_w = J(N, 1, 1)
451
+
452
+ proj1 = panelmean((*PF1).sort(sorted_w), *PF1)[(*PF1).levels, .]
453
+ proj2 = panelmean((*PF2).sort(sorted_w), *PF2)[(*PF2).levels, .]
454
+ sorted_w = ((sorted_w - proj1) :!= 1) :| ((sorted_w - proj2) :!= 1)
455
+ proj1 = proj2 = .
456
+ sorted_w = (*PF1).sort(sorted_w)
457
+
458
+ prune = 1
459
+ }
460
+
461
+ // --------------------------------------------------------------------------
462
+ // prune_1core()
463
+ // --------------------------------------------------------------------------
464
+ // Prune edges with degree-1
465
+ // That is, recursively remove CEOs that only worked at one firm,
466
+ // and firms that only had one CEO in the sample, until every agent
467
+ // in the dataset has at least two matches
468
+ // --------------------------------------------------------------------------
469
+ `Variables' BipartiteGraph::expand_1core(`Variables' y)
470
+ {
471
+ `Boolean' zero_weights
472
+ `Variable' sorted_y
473
+ `Integer' i, j, j1, j2, i2, k1, k2, nk
474
+ `Matrix' tmp_y
475
+ `Vector' tmp_w, tmp_idx, new_w
476
+ `RowVector' tmp_mean
477
+
478
+ if (prune==0) return(y)
479
+ if (verbose) printf("\n{txt}## Expanding 2-core into original dataset\n\n")
480
+ assert(N_drop == rows(drop_order))
481
+
482
+ sorted_y = (*PF1).sort(y)
483
+
484
+ i2 = 0
485
+ for (i=N_drop; i>=1; i--) {
486
+ j = drop_order[i]
487
+ if (j > 0) {
488
+ j1 = (*PF1).info[j, 1]
489
+ j2 = (*PF1).info[j, 2]
490
+
491
+ tmp_y = sorted_y[| j1 , 1 \ j2 , . |] // panelsubmatrix(sorted_y, j, (*PF1).info)
492
+ tmp_w = sorted_w[|j1, 1 \ j2, .|] // panelsubmatrix(sorted_w, j, (*PF1).info)
493
+ new_w = has_weights ? sorted_true_weight[|j1, 1 \ j2, .|] : J(j2-j1+1, 1, 1)
494
+ zero_weights = !sum(tmp_w)
495
+ if (!zero_weights) {
496
+ tmp_mean = mean(tmp_y, tmp_w)
497
+ assert(!missing(tmp_mean)) // bugbug remove later
498
+ sorted_y[| j1 , 1 \ j2 , . |] = tmp_y :- tmp_mean
499
+ }
500
+ sorted_w[| j1 , 1 \ j2 , 1 |] = new_w
501
+ }
502
+ else {
503
+ ++i2
504
+ j1 = drop2info[-j, 1]
505
+ j2 = drop2info[-j, 2]
506
+ tmp_idx = drop2idx[| j1 , 1 \ j2 , 1 |]
507
+ tmp_y = sorted_y[tmp_idx, .]
508
+ tmp_w = sorted_w[tmp_idx]
509
+ zero_weights = !sum(tmp_w)
510
+ if (zero_weights) {
511
+ tmp_w = has_weights ? sorted_true_weight[tmp_idx] : J(j2-j1+1, 1, 1)
512
+ }
513
+ tmp_mean = mean(tmp_y, tmp_w)
514
+ assert(!missing(tmp_mean)) // bugbug remove later
515
+ nk = rows(tmp_idx)
516
+ for (k1=1; k1<=nk; k1++) {
517
+ k2 = tmp_idx[k1]
518
+ sorted_y[k2, .] = sorted_y[k2, .] - tmp_mean
519
+ sorted_w[k2] = has_weights ? sorted_true_weight[k2] : 1
520
+ }
521
+ }
522
+ }
523
+
524
+ if (verbose) printf("{txt} - number of coefficients solved triangularly: {res}%s{txt}\n", strofreal(rows(drop_order)))
525
+ return((*PF1).invsort(sorted_y))
526
+ }
527
+
528
+
529
+ `Variables' BipartiteGraph::partial_out(`Variables' y)
530
+ {
531
+
532
+ }
533
+
534
+
535
+ `Variables' BipartiteGraph::__partial_out_map(`Variables' y)
536
+ {
537
+
538
+ }
539
+
540
+
541
+ `Variables' BipartiteGraph::__partial_out_laplacian(`Variables' y)
542
+ {
543
+
544
+ }
545
+
546
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_class.mata ADDED
@@ -0,0 +1,1384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // --------------------------------------------------------------------------
2
+ // FixedEffects main class
3
+ // --------------------------------------------------------------------------
4
+
5
+ mata:
6
+
7
+ class FixedEffects
8
+ {
9
+ // Factors
10
+ `Integer' G // Number of sets of FEs
11
+ `Integer' N // number of obs
12
+ `Integer' M // Sum of all possible FE coefs
13
+ `Factors' factors
14
+ `Vector' sample
15
+ `Varlist' absvars
16
+ `Varlist' ivars
17
+ `Varlist' cvars
18
+ `Boolean' has_intercept
19
+ `RowVector' intercepts
20
+ `RowVector' num_slopes
21
+ `Integer' num_singletons
22
+ `Boolean' save_any_fe
23
+ `Boolean' save_all_fe
24
+ `Varlist' targets
25
+ `RowVector' save_fe
26
+
27
+ // Constant-related (also see -has_intercept-)
28
+ `Boolean' report_constant
29
+ `Boolean' compute_constant
30
+
31
+ // Optimization options
32
+ `Real' tolerance
33
+ `Real' extra_tolerance // Try to achieve this tol if it only takes a few more iters: ceil(10%)
34
+ `Integer' maxiter
35
+ `String' transform // Kaczmarz Cimmino Symmetric_kaczmarz (k c s)
36
+ `String' acceleration // Acceleration method. None/No/Empty is none\
37
+ `Integer' accel_start // Iteration where we start to accelerate // set it at 6? 2?3?
38
+ `string' slope_method
39
+ `Boolean' prune // Whether to recursively prune degree-1 edges
40
+ `Boolean' abort // Raise error if convergence failed?
41
+ `Integer' accel_freq // Specific to Aitken's acceleration
42
+ `Boolean' storing_alphas // 1 if we should compute the alphas/fes
43
+ `Real' conlim // specific to LSMR
44
+ `Real' btol // specific to LSMR
45
+ `Boolean' always_run_lsmr_preconditioner
46
+ `Integer' min_ok
47
+
48
+ // Optimization objects
49
+ `BipartiteGraph' bg // Used when pruning 1-core vertices
50
+ `Vector' pruned_weight // temp. weight for the factors that were pruned
51
+ `Integer' prune_g1 // Factor 1/2 in the bipartite subgraph that gets pruned
52
+ `Integer' prune_g2 // Factor 2/2 in the bipartite subgraph that gets pruned
53
+ `Integer' num_pruned // Number of vertices (levels) that were pruned
54
+
55
+ // Misc
56
+ `Integer' verbose
57
+ `Boolean' timeit
58
+ `Boolean' compact
59
+ `Integer' poolsize
60
+ `Boolean' store_sample
61
+ `Real' finite_condition
62
+ `Real' compute_rre // Relative residual error: || e_k - e || / || e ||
63
+ `Real' rre_depvar_norm
64
+ `Vector' rre_varname
65
+ `Vector' rre_true_residual
66
+ `String' panelvar
67
+ `String' timevar
68
+
69
+ `RowVector' not_basevar // Boolean vector indicating whether each regressor is or not a basevar
70
+ `String' fullindepvars // indepvars including basevars
71
+
72
+ // Weight-specific
73
+ `Boolean' has_weights
74
+ `Variable' weight // unsorted weight
75
+ `String' weight_var // Weighting variable
76
+ `String' weight_type // Weight type (pw, fw, etc)
77
+
78
+ // Absorbed degrees-of-freedom computations
79
+ `Integer' G_extended // Number of intercepts plus slopes
80
+ `Integer' df_a_redundant // e(mobility)
81
+ `Integer' df_a_initial
82
+ `Integer' df_a // df_a_inital - df_a_redundant
83
+ `Vector' doflist_M
84
+ `Vector' doflist_K
85
+ `Vector' doflist_M_is_exact
86
+ `Vector' doflist_M_is_nested
87
+ `Vector' is_slope
88
+ `Integer' df_a_nested // Redundant due to bein nested; used for: r2_a r2_a_within rmse
89
+
90
+ // VCE and cluster variables
91
+ `String' vcetype
92
+ `Integer' num_clusters
93
+ `Varlist' clustervars
94
+ `Varlist' base_clustervars
95
+ `String' vceextra
96
+
97
+ // Regression-specific
98
+ `String' varlist // y x1 x2 x3
99
+ `String' depvar // y
100
+ `String' indepvars // x1 x2 x3
101
+ `String' tousevar
102
+
103
+ `Boolean' drop_singletons
104
+ `String' absorb // contents of absorb()
105
+ `String' select_if // If condition
106
+ `String' select_in // In condition
107
+ `String' model // ols, iv
108
+ `String' summarize_stats
109
+ `Boolean' summarize_quietly
110
+ `StringRowVector' dofadjustments // firstpair pairwise cluster continuous
111
+ `Varname' groupvar
112
+ `String' residuals
113
+ `Variable' residuals_vector
114
+ `RowVector' kept // 1 if the regressors are not deemed as omitted (by partial_out+cholsolve+invsym)
115
+ `String' diopts
116
+
117
+ // Output
118
+ `String' cmdline
119
+ `String' subcmd
120
+ `String' title
121
+ `Boolean' converged
122
+ `Integer' iteration_count // e(ic)
123
+ `Varlist' extended_absvars
124
+ `String' notes
125
+ `Integer' df_r
126
+ `Integer' df_m
127
+ `Integer' N_clust
128
+ `Integer' N_clust_list
129
+ `Real' rss
130
+ `Real' rmse
131
+ `Real' F
132
+ `Real' tss
133
+ `Real' tss_within
134
+ `Real' sumweights
135
+ `Real' r2
136
+ `Real' r2_within
137
+ `Real' r2_a
138
+ `Real' r2_a_within
139
+ `Real' ll
140
+ `Real' ll_0
141
+ `Real' accuracy
142
+ `RowVector' means
143
+ `RowVector' all_stdevs
144
+
145
+ // Methods
146
+ `Void' new()
147
+ `Void' destroy()
148
+ `Void' load_weights() // calls update_sorted_weights, etc.
149
+ `Void' update_sorted_weights()
150
+ `Void' update_cvar_objects()
151
+ `Matrix' partial_out()
152
+ `Matrix' partial_out_pool()
153
+ `Void' _partial_out()
154
+ `Variables' project_one_fe()
155
+ `Void' prune_1core()
156
+ `Void' _expand_1core()
157
+ `Void' estimate_dof()
158
+ `Void' estimate_cond()
159
+ `Void' save_touse()
160
+ `Void' store_alphas()
161
+ `Void' save_variable()
162
+ `Void' post_footnote()
163
+ `Void' post()
164
+ `FixedEffects' reload() // create new instance of object
165
+
166
+ // LSMR-Specific Methods
167
+ `Real' lsmr_norm()
168
+ `Vector' lsmr_A_mult()
169
+ `Vector' lsmr_At_mult()
170
+ }
171
+
172
+
173
+ // Set default value of properties
174
+ `Void' FixedEffects::new()
175
+ {
176
+ num_singletons = .
177
+ sample = J(0, 1, .)
178
+ weight = 1 // set to 1 so cross(x, S.weight, y)==cross(x, y)
179
+
180
+ verbose = 0
181
+ timeit = 0
182
+ compact = 0
183
+ poolsize = .
184
+ finite_condition = .
185
+ residuals = ""
186
+ residuals_vector = .
187
+ panelvar = timevar = ""
188
+ iteration_count = 0
189
+ accuracy = -1 // Epsilon at the time of convergence
190
+
191
+ // Optimization defaults
192
+ slope_method = "invsym"
193
+ maxiter = 1e4
194
+ tolerance = 1e-8
195
+ transform = "symmetric_kaczmarz"
196
+ acceleration = "conjugate_gradient"
197
+ accel_start = 6
198
+ conlim = 1e+8 // lsmr only
199
+ btol = 1e-8 // lsmr only (note: atol is just tolerance)
200
+ always_run_lsmr_preconditioner = 0
201
+ min_ok = 1
202
+
203
+ prune = 0
204
+ converged = 0
205
+ abort = 1
206
+ storing_alphas = 0
207
+ report_constant = compute_constant = 1
208
+
209
+ // Specific to Aitken:
210
+ accel_freq = 3
211
+
212
+ not_basevar = J(1, 0, .)
213
+
214
+ means = all_stdevs = J(1, 0, .) // necessary with pool() because we append to it
215
+ kept = J(1, 0, .) // necessary with pool() because we append to it
216
+ }
217
+
218
+
219
+ `Void' FixedEffects::destroy()
220
+ {
221
+ // stata(sprintf("cap drop %s", tousevar))
222
+ }
223
+
224
+
225
+ // This adds/removes weights or changes their type
226
+ `Void' FixedEffects::load_weights(`String' weighttype, `String' weightvar, `Variable' weight, `Boolean' verbose)
227
+ {
228
+ `Integer' g
229
+ `FactorPointer' pf
230
+ `Matrix' precond // used for lsmr
231
+ `Varname' cvars_g
232
+
233
+ this.has_weights = (weighttype != "" & weightvar != "")
234
+ if (this.verbose > 0 & verbose > 0 & this.has_weights) printf("{txt}## Loading weights [%s=%s]\n", weighttype, weightvar)
235
+
236
+ // Update main properties
237
+ this.weight_var = weightvar
238
+ this.weight_type = weighttype
239
+
240
+ // Update booleans
241
+ for (g=1; g<=this.G; g++) {
242
+ asarray(this.factors[g].extra, "has_weights", this.has_weights)
243
+ }
244
+
245
+ // Optionally load weight from dataset
246
+ if (this.has_weights & weight==J(0,1,.)) {
247
+ weight = st_data(this.sample, this.weight_var)
248
+ }
249
+
250
+ // Update weight vectors
251
+ if (this.has_weights) {
252
+ if (this.verbose > 0 & verbose > 0) printf("{txt}## Sorting weights for each absvar\n")
253
+ this.update_sorted_weights(weight)
254
+ }
255
+ else {
256
+ // If no weights, clear this up
257
+ this.weight = 1 // same as defined by new()
258
+ for (g=1; g<=this.G; g++) {
259
+ asarray(this.factors[g].extra, "weights", .)
260
+ asarray(this.factors[g].extra, "weighted_counts", .)
261
+ }
262
+ }
263
+
264
+ // Update cvar objects (do AFTER updating weights!)
265
+ // (this is meaningless with iweights)
266
+ if (weighttype != "iweight") this.update_cvar_objects()
267
+
268
+ // Preconditioners for LSMR
269
+ if (acceleration=="lsmr" | always_run_lsmr_preconditioner) {
270
+
271
+ // Compute M
272
+ M = 0
273
+ for (g=1; g<=G; g++) {
274
+ M = M + factors[g].num_levels * (intercepts[g] + num_slopes[g])
275
+ }
276
+
277
+ // Preconditioner
278
+ for (g=1; g<=G; g++) {
279
+ pf = &(factors[g])
280
+ if (intercepts[g]) {
281
+ precond = has_weights ? asarray((*pf).extra, "weighted_counts") : (*pf).counts
282
+ asarray((*pf).extra, "precond_intercept", sqrt(1 :/ precond))
283
+ }
284
+
285
+ if (num_slopes[g]) {
286
+ cvars_g = tokens(this.cvars[g])
287
+ precond = st_data(this.sample, cvars_g)
288
+ precond = reghdfe_panel_precondition(precond, (*pf))
289
+ asarray((*pf).extra, "precond_slopes", precond)
290
+ }
291
+
292
+ precond = .
293
+ }
294
+ }
295
+
296
+ }
297
+
298
+
299
+ // This just updates the weight but doesn't change the type or variable of the weight
300
+ `Void' FixedEffects::update_sorted_weights(`Variable' weight)
301
+ {
302
+ `Integer' g
303
+ `Real' min_w
304
+ `Variable' w
305
+ `FactorPointer' pf
306
+
307
+ assert_msg(!hasmissing(weight), "weights can't be missing")
308
+ this.weight = weight
309
+ assert(rows(weight)==rows(sample))
310
+ if (verbose > 0) printf("{txt} - loading %s weight from variable %s\n", weight_type, weight_var)
311
+ for (g=1; g<=G; g++) {
312
+ if (verbose > 0) printf("{txt} - sorting weight for factor {res}%s{txt}\n", absvars[g])
313
+ pf = &(factors[g])
314
+ w = (*pf).sort(weight)
315
+
316
+ // Rescale weights so there are no weights below 1
317
+ if (weight_type != "fweight") {
318
+ min_w = colmin(w)
319
+ if (min_w < 1e-6) min_w = 1e-6 // Prevent bugs if a weight is very close to zero
320
+ //assert_msg(min_w > 0, "weights must be positive")
321
+ //if (min_w <= 0) printf("{err} not all weights are positive\n")
322
+ if (0 < min_w & min_w < 1) {
323
+ w = w :/ min_w
324
+ }
325
+ }
326
+
327
+ asarray((*pf).extra, "weights", w)
328
+ asarray((*pf).extra, "weighted_counts", `panelsum'(w, (*pf).info))
329
+ }
330
+ }
331
+
332
+
333
+ `Void' FixedEffects::update_cvar_objects()
334
+ {
335
+ `Integer' g
336
+ `FactorPointer' pf
337
+
338
+ for (g=1; g<=G; g++) {
339
+ pf = &(factors[g])
340
+ // Update mean(z; w) and inv(z'z; w) where z is a slope variable and w is the weight
341
+ if (num_slopes[g]) {
342
+ if (verbose > 0) printf("{txt} - precomputing cvar objects for factor {res}%s{txt}\n", absvars[g])
343
+ if (intercepts[g]) {
344
+ asarray((*pf).extra, "xmeans",
345
+ panelmean(asarray((*pf).extra, "x"), *pf))
346
+ }
347
+ asarray((*pf).extra, "inv_xx", precompute_inv_xx(*pf, intercepts[g]))
348
+ }
349
+ }
350
+ }
351
+
352
+
353
+ `Variables' FixedEffects::partial_out(`Anything' data,
354
+ | `Boolean' save_tss,
355
+ `Boolean' standardize_data,
356
+ `Boolean' first_is_depvar)
357
+ {
358
+ // -data- is either a varlist or a matrix
359
+ `Variables' y
360
+ `Varlist' vars
361
+ `Integer' i
362
+ `Integer' k
363
+
364
+ if (args()<2 | save_tss==.) save_tss = 0
365
+ if (args()<3 | standardize_data==.) standardize_data = 1
366
+ if (args()<4 | first_is_depvar==.) first_is_depvar = 1
367
+
368
+ if (eltype(data) == "string") {
369
+ vars = tokens(invtokens(data)) // tweak to allow string scalars and string vectors
370
+ k = cols(vars)
371
+
372
+ if (poolsize < k) {
373
+ if (verbose > 0) printf("\n{txt}## Loading and partialling out %g variables in blocks of %g\n\n", k, poolsize)
374
+ if (timeit) timer_on(50)
375
+ partial_out_pool(vars, save_tss, standardize_data, first_is_depvar, poolsize, y=.)
376
+ if (timeit) timer_off(50)
377
+ }
378
+ else {
379
+ if (verbose > 0) printf("\n{txt}## Partialling out %g variables: {res}%s{txt}\n\n", cols(vars), invtokens(vars))
380
+ if (verbose > 0) printf("{txt} - Loading variables into Mata\n")
381
+ if (timeit) timer_on(50)
382
+ _st_data_wrapper(sample, invtokens(vars), y=., verbose)
383
+ if (timeit) timer_off(50)
384
+
385
+ // Workaround to odd Stata quirk
386
+ if (timeit) timer_on(51)
387
+ if (cols(y) > cols(vars)) {
388
+ printf("{err}(some empty columns were added due to a bug/quirk in {bf:st_data()}; %g cols created instead of %g for {it:%s}; running slower workaround)\n", cols(y), cols(vars), invtokens(vars))
389
+ partial_out_pool(vars, save_tss, standardize_data, first_is_depvar, 1, y=.)
390
+ }
391
+ else {
392
+ _partial_out(y, save_tss, standardize_data, first_is_depvar)
393
+ }
394
+ if (timeit) timer_off(51)
395
+
396
+ }
397
+ }
398
+ else {
399
+ if (verbose > 0) printf("\n{txt}## Partialling out %g variables\n\n", cols(data))
400
+ if (timeit) timer_on(54)
401
+ _partial_out(y=data, save_tss, standardize_data, first_is_depvar)
402
+ if (timeit) timer_off(54)
403
+ }
404
+
405
+ if (verbose==0) printf(`"{txt}({browse "http://scorreia.com/research/hdfe.pdf":MWFE estimator} converged in %s iteration%s)\n"', strofreal(iteration_count), iteration_count > 1 ? "s" : "s")
406
+ return(y)
407
+ }
408
+
409
+
410
+
411
+ `Variables' FixedEffects::partial_out_pool(`Anything' vars,
412
+ `Boolean' save_tss,
413
+ `Boolean' standardize_data,
414
+ `Boolean' first_is_depvar,
415
+ `Integer' step,
416
+ `Variables' y)
417
+ {
418
+ `Variables' part_y
419
+ `Integer' i, j, ii
420
+ `Integer' k
421
+ `StringRowVector' keepvars
422
+
423
+ k = cols(vars)
424
+ assert(step > 0)
425
+ assert(step < k)
426
+ y = J(rows(sample), 0, .)
427
+
428
+ for (i=1; i<=k; i=i+step) {
429
+
430
+ j = i + step - 1
431
+ if (j>k) j = k
432
+
433
+ // Load data
434
+ _st_data_wrapper(sample, vars[i..j], part_y=., verbose)
435
+
436
+ if (cols(part_y) > j - i + 1) {
437
+ printf("{err}(some empty columns were added due to a bug/quirk in {bf:st_data()}; running slower workaround)\n")
438
+ if (timeit) timer_on(51)
439
+ part_y = J(rows(sample), 0, .)
440
+ for (ii=i; ii<=j; ii++) {
441
+ part_y = part_y, st_data(sample, vars[ii])
442
+ }
443
+ if (timeit) timer_off(51)
444
+ }
445
+
446
+ // Drop loaded vars as quickly as possible
447
+ if (compact) {
448
+ // st_dropvar(vars[i..j]) // bugbug what if repeated??
449
+ keepvars = base_clustervars , timevar, panelvar, (j == k ? "" : vars[j+1..k])
450
+ keepvars = tokens(invtokens(keepvars))
451
+ if (cols(keepvars)) {
452
+ stata(sprintf("fvrevar %s, list", invtokens(keepvars)))
453
+ stata(sprintf("keep %s", st_global("r(varlist)")))
454
+ }
455
+ else {
456
+ stata("clear")
457
+ }
458
+ }
459
+
460
+ _partial_out(part_y, save_tss, standardize_data, first_is_depvar)
461
+ y = y, part_y
462
+ part_y = .
463
+ }
464
+ }
465
+
466
+
467
+ `Void' FixedEffects::store_alphas(`Anything' d_varname)
468
+ {
469
+ `Integer' g, i, j
470
+ `StringRowVector' varlabel
471
+ `Variable' d
472
+ `RowVector' tmp_stdev
473
+
474
+ if (verbose > 0) printf("\n{txt}## Storing estimated fixed effects\n\n")
475
+
476
+ // -d- can be either the data or the variable name
477
+
478
+ // Load -d- variable
479
+ if (eltype(d_varname) == "string") {
480
+ if (verbose > 0) printf("{txt} - Loading d = e(depvar) - xb - e(resid)\n")
481
+ d = st_data(sample, d_varname)
482
+ }
483
+ else {
484
+ d = d_varname
485
+ }
486
+ assert(!missing(d))
487
+
488
+ // Create empty alphas
489
+ if (verbose > 0) printf("{txt} - Initializing alphas\n")
490
+ for (g=j=1; g<=G; g++) {
491
+ if (!save_fe[g]) continue
492
+ asarray(factors[g].extra, "alphas", J(factors[g].num_levels, intercepts[g] + num_slopes[g], 0))
493
+ asarray(factors[g].extra, "tmp_alphas", J(factors[g].num_levels, intercepts[g] + num_slopes[g], 0))
494
+ }
495
+
496
+ // Fill out alphas
497
+ if (verbose > 0) printf("{txt} - Computing alphas\n")
498
+ storing_alphas = 1
499
+ converged = 0
500
+ d = accelerate_sd(this, d, &transform_kaczmarz())
501
+ storing_alphas = 0
502
+
503
+ if (verbose > 0) printf("{txt} - SSR of d wrt FEs: %g\n", quadcross(d,d))
504
+
505
+ // Store alphas in dataset
506
+ if (verbose > 0) printf("{txt} - Creating varlabels\n")
507
+ for (g=j=1; g<=G; g++) {
508
+ if (!save_fe[g]) {
509
+ j = j + intercepts[g] + num_slopes[g]
510
+ continue
511
+ }
512
+ varlabel = J(1, intercepts[g] + num_slopes[g], "")
513
+ for (i=1; i<=cols(varlabel); i++) {
514
+ varlabel[i] = sprintf("[FE] %s", extended_absvars[j])
515
+ j++
516
+ }
517
+
518
+ if (num_slopes[g]) {
519
+ if (verbose > 0) printf("{txt} - Recovering unstandardized variables\n")
520
+ tmp_stdev = asarray(factors[g].extra, "x_stdevs")
521
+ if (intercepts[g]) tmp_stdev = 1, tmp_stdev
522
+
523
+ // We need to *divide* the coefs by the stdev, not multiply!
524
+ asarray(factors[g].extra, "alphas",
525
+ asarray(factors[g].extra, "alphas") :/ tmp_stdev
526
+ )
527
+ }
528
+
529
+ if (verbose > 0) printf("{txt} - Storing alphas in dataset\n")
530
+ save_variable(targets[g], asarray(factors[g].extra, "alphas")[factors[g].levels, .], varlabel)
531
+ asarray(factors[g].extra, "alphas", .)
532
+ asarray(factors[g].extra, "tmp_alphas", .)
533
+ }
534
+ }
535
+
536
+
537
+ `Void' FixedEffects::_partial_out(`Variables' y,
538
+ | `Boolean' save_tss,
539
+ `Boolean' standardize_data,
540
+ `Boolean' first_is_depvar,
541
+ `Boolean' flush)
542
+ {
543
+ `RowVector' stdevs, needs_zeroing, kept2
544
+ `FunctionP' funct_transform, func_accel // transform
545
+ `Real' y_mean, collinear_tol
546
+ `Vector' lhs
547
+ `Vector' alphas
548
+ `StringRowVector' vars
549
+ `Integer' i
550
+
551
+ if (args()<2 | save_tss==.) save_tss = 0
552
+ if (args()<3 | standardize_data==.) standardize_data = 1
553
+ if (args()<4 | first_is_depvar==.) first_is_depvar = 1
554
+ if (args()<5 | flush==.) flush = 0 // whether or not to flush the values of means & kept
555
+
556
+ assert(anyof((0, 1, 2), standardize_data)) // 0=Don't standardize; 1=Std. and REVERT after partial; 2=Std., partial, and KEEP STANDARDIZED
557
+
558
+ if (flush) {
559
+ iteration_count = 0
560
+ accuracy = -1
561
+ means = stdevs = J(1, 0, .)
562
+ kept = J(1, 0, .)
563
+ }
564
+
565
+ // Shortcut for trivial case (1 FE)
566
+ if (G==1) acceleration = "none"
567
+
568
+ // Solver Warnings
569
+ if (transform=="kaczmarz" & acceleration=="conjugate_gradient") {
570
+ printf("{err}(WARNING: convergence is {bf:unlikely} with transform=kaczmarz and accel=CG)\n")
571
+ }
572
+
573
+ // Load transform pointer
574
+ if (transform=="cimmino") funct_transform = &transform_cimmino()
575
+ if (transform=="kaczmarz") funct_transform = &transform_kaczmarz()
576
+ if (transform=="symmetric_kaczmarz") funct_transform = &transform_sym_kaczmarz()
577
+ if (transform=="random_kaczmarz") funct_transform = &transform_rand_kaczmarz()
578
+
579
+ // Pointer to acceleration routine
580
+ if (acceleration=="test") func_accel = &accelerate_test()
581
+ if (acceleration=="none") func_accel = &accelerate_none()
582
+ if (acceleration=="conjugate_gradient") func_accel = &accelerate_cg()
583
+ if (acceleration=="steepest_descent") func_accel = &accelerate_sd()
584
+ if (acceleration=="aitken") func_accel = &accelerate_aitken()
585
+ if (acceleration=="hybrid") func_accel = &accelerate_hybrid()
586
+
587
+ // Compute TSS of depvar
588
+ if (timeit) timer_on(60)
589
+ if (save_tss & tss==.) {
590
+ lhs = y[., 1]
591
+ if (has_intercept) {
592
+ y_mean = mean(lhs, weight)
593
+ tss = crossdev(lhs, y_mean, weight, lhs, y_mean) // Sum of w[i] * (y[i]-y_mean) ^ 2
594
+ }
595
+ else {
596
+ tss = cross(lhs, weight, lhs) // Sum of w[i] * y[i] ^ 2
597
+ }
598
+ lhs = .
599
+ if (weight_type=="aweight" | weight_type=="pweight") tss = tss * rows(y) / sum(weight)
600
+ }
601
+ if (timeit) timer_off(60)
602
+
603
+
604
+ // Compute 2-norm of each var, to see if we need to drop as regressors
605
+ kept2 = diagonal(cross(y, y))'
606
+
607
+ // Compute and save means of each var
608
+ means = means , ( compute_constant ? mean(y, weight) : J(1, cols(y), 1) )
609
+
610
+ // Intercept LSMR case
611
+ if (acceleration=="lsmr") {
612
+ // RRE benchmarking
613
+ if (compute_rre) rre_depvar_norm = norm(y[., 1])
614
+ if (cols(y)==1) {
615
+ y = lsmr(this, y, alphas=.)
616
+ alphas = . // or return them!
617
+ }
618
+ else {
619
+ for (i=1; i<=cols(y); i++) {
620
+ y[., i] = lsmr(this, y[., i], alphas=.)
621
+ }
622
+ alphas = .
623
+ }
624
+ }
625
+ else {
626
+
627
+ // Standardize variables
628
+ if (timeit) timer_on(61)
629
+ if (standardize_data) {
630
+ if (verbose > 0) printf("{txt} - Standardizing variables\n")
631
+ stdevs = reghdfe_standardize(y)
632
+ all_stdevs = all_stdevs, stdevs
633
+ kept2 = kept2 :/ stdevs :^ 2
634
+ }
635
+ if (timeit) timer_off(61)
636
+
637
+ // RRE benchmarking
638
+ if (compute_rre) {
639
+ rre_true_residual = rre_true_residual / (standardize_data ? stdevs[1] : 1)
640
+ rre_depvar_norm = norm(y[., 1])
641
+ }
642
+
643
+ // Solve
644
+ if (verbose>0) printf("{txt} - Running solver (acceleration={res}%s{txt}, transform={res}%s{txt} tol={res}%-1.0e{txt})\n", acceleration, transform, tolerance)
645
+ if (verbose==1) printf("{txt} - Iterating:")
646
+ if (verbose>1) printf("{txt} ")
647
+ converged = 0 // converged will get updated by check_convergence()
648
+
649
+ if (timeit) timer_on(62)
650
+ if (G==1 & factors[1].method=="none" & num_slopes[1]==0 & !(storing_alphas & save_fe[1])) {
651
+ // Speedup for constant-only case (no fixed effects)
652
+ assert(factors[1].num_levels == 1)
653
+ iteration_count = 1
654
+ accuracy = 0
655
+ if (standardize_data == 1) {
656
+ y = stdevs :* y :- stdevs :* mean(y, has_weights ? asarray(factors[1].extra, "weights") : 1) // Undoing standardization
657
+ }
658
+ else {
659
+ y = y :- mean(y, has_weights ? asarray(factors[1].extra, "weights") : 1)
660
+ }
661
+ }
662
+ else {
663
+ if (standardize_data == 1) {
664
+ y = (*func_accel)(this, y, funct_transform) :* stdevs // Undoing standardization
665
+ }
666
+ else {
667
+ y = (*func_accel)(this, y, funct_transform) // 'this' is like python's self
668
+ }
669
+ }
670
+ if (timeit) timer_off(62)
671
+
672
+ if (prune) {
673
+ assert_msg(G==2, "prune option requires only two FEs")
674
+ if (timeit) timer_on(63)
675
+ _expand_1core(y)
676
+ if (timeit) timer_off(63)
677
+ }
678
+ }
679
+
680
+ assert_msg(!hasmissing(y), "error partialling out; missing values found")
681
+
682
+ // Standardizing makes it hard to detect values that are perfectly collinear with the absvars
683
+ // in which case they should be 0.00 but they end up as e.g. 1e-16
684
+ // EG: reghdfe price ibn.foreign , absorb(foreign)
685
+
686
+ // This will edit to zero entire columns where *ALL* values are very close to zero
687
+ if (timeit) timer_on(64)
688
+ vars = cols(varlist) > 1 ? varlist : tokens(varlist)
689
+ if (cols(vars)!=cols(y)) vars ="variable #" :+ strofreal(1..cols(y))
690
+ collinear_tol = min(( 1e-6 , tolerance / 10))
691
+
692
+ kept2 = (diagonal(cross(y, y))' :/ kept2) :> (collinear_tol)
693
+ if (first_is_depvar & kept2[1]==0) {
694
+ kept2[1] = 1
695
+ if (verbose > -1) printf("{txt}warning: %s might be perfectly explained by fixed effects (tol =%3.1e)\n", vars[1], collinear_tol)
696
+ }
697
+ needs_zeroing = `selectindex'(!kept2)
698
+ if (cols(needs_zeroing)) {
699
+ y[., needs_zeroing] = J(rows(y), cols(needs_zeroing), 0)
700
+ for (i=1; i<=cols(vars); i++) {
701
+ if (!kept2[i] & verbose>-1 & (i > 1 | !first_is_depvar)) {
702
+ printf("{txt}note: {res}%s{txt} is probably collinear with the fixed effects (all partialled-out values are close to zero; tol =%3.1e)\n", vars[i], collinear_tol)
703
+ }
704
+ }
705
+ }
706
+
707
+ kept = kept, kept2
708
+ if (timeit) timer_off(64)
709
+ }
710
+
711
+
712
+ `Variables' FixedEffects::project_one_fe(`Variables' y, `Integer' g)
713
+ {
714
+ `Factor' f
715
+ `Boolean' store_these_alphas
716
+ `Matrix' alphas, proj_y
717
+
718
+ // Cons+K+W, Cons+K, K+W, K, Cons+W, Cons = 6 variants
719
+
720
+ f = factors[g]
721
+ store_these_alphas = storing_alphas & save_fe[g]
722
+ if (store_these_alphas) assert(cols(y)==1)
723
+
724
+ if (num_slopes[g]==0) {
725
+ if (store_these_alphas) {
726
+ alphas = panelmean(f.sort(y), f)
727
+ asarray(factors[g].extra, "tmp_alphas", alphas)
728
+ return(alphas[f.levels, .])
729
+ }
730
+ else {
731
+ if (cols(y)==1 & f.num_levels > 1) {
732
+ return(panelmean(f.sort(y), f)[f.levels])
733
+ }
734
+ else {
735
+ return(panelmean(f.sort(y), f)[f.levels, .])
736
+ }
737
+ }
738
+ }
739
+ else {
740
+ // This includes both cases, with and w/out intercept (## and #)
741
+ if (store_these_alphas) {
742
+ alphas = J(f.num_levels, intercepts[g] + num_slopes[g], .)
743
+ proj_y = panelsolve_invsym(f.sort(y), f, intercepts[g], alphas)
744
+ asarray(factors[g].extra, "tmp_alphas", alphas)
745
+ return(proj_y)
746
+ }
747
+ else {
748
+ return(panelsolve_invsym(f.sort(y), f, intercepts[g]))
749
+ }
750
+ }
751
+ }
752
+
753
+
754
+ `Void' FixedEffects::estimate_dof()
755
+ {
756
+ `Boolean' has_int
757
+ `Integer' g, h // index FEs (1..G)
758
+ `Integer' num_intercepts // Number of absvars with an intercept term
759
+ `Integer' i_cluster, i_intercept, j_intercept
760
+ `Integer' i // index 1..G_extended
761
+ `Integer' j
762
+ `Integer' bg_verbose // verbose level when calling BipartiteGraph()
763
+ `Integer' m // Mobility groups between a specific pair of FEs
764
+ `RowVector' SubGs
765
+ `RowVector' offsets, idx, zeros, results
766
+ `Matrix' tmp
767
+ `Variables' data
768
+ `DataCol' cluster_data
769
+ `String' absvar, clustervar
770
+ `Factor' F
771
+ `BipartiteGraph' BG
772
+ `Integer' pair_count
773
+
774
+ if (verbose > 0) printf("\n{txt}## Estimating degrees-of-freedom absorbed by the fixed effects\n\n")
775
+
776
+ // Count all FE intercepts and slopes
777
+ SubGs = intercepts + num_slopes
778
+ G_extended = sum(SubGs)
779
+ num_intercepts = sum(intercepts)
780
+ offsets = runningsum(SubGs) - SubGs :+ 1 // start of each FE within the extended list
781
+ idx = `selectindex'(intercepts) // Select all FEs with intercepts
782
+ if (verbose > 0) printf("{txt} - there are %f fixed intercepts and slopes in the %f absvars\n", G_extended, G)
783
+
784
+ // Initialize result vectors and scalars
785
+ doflist_M_is_exact = J(1, G_extended, 0)
786
+ doflist_M_is_nested = J(1, G_extended, 0)
787
+ df_a_nested = 0
788
+
789
+ // (1) M will hold the redundant coefs for each extended absvar (G_extended, not G)
790
+ doflist_M = J(1, G_extended, 0)
791
+ assert(0 <= num_clusters & num_clusters <= 10)
792
+ if (num_clusters > 0 & anyof(dofadjustments, "clusters")) {
793
+
794
+ // (2) (Intercept-Only) Look for absvars that are clustervars
795
+ for (i_intercept=1; i_intercept<=length(idx); i_intercept++) {
796
+ g = idx[i_intercept]
797
+ i = offsets[g]
798
+ absvar = invtokens(tokens(ivars[g]), "#")
799
+ if (anyof(clustervars, absvar)) {
800
+ doflist_M[i] = factors[g].num_levels
801
+ df_a_nested = df_a_nested + doflist_M[i]
802
+ doflist_M_is_exact[i] = doflist_M_is_nested[i] = 1
803
+ idx[i_intercept] = 0
804
+ if (verbose > 0) printf("{txt} - categorical variable {res}%s{txt} is also a cluster variable, so it doesn't reduce DoF\n", absvar)
805
+ }
806
+ }
807
+ idx = select(idx, idx)
808
+
809
+ // (3) (Intercept-Only) Look for absvars that are nested within a clustervar
810
+ for (i_cluster=1; i_cluster<= num_clusters; i_cluster++) {
811
+ cluster_data = .
812
+ if (!length(idx)) break // no more absvars to process
813
+ for (i_intercept=1; i_intercept<=length(idx); i_intercept++) {
814
+
815
+ g = idx[i_intercept]
816
+ i = offsets[g]
817
+ absvar = invtokens(tokens(ivars[g]), "#")
818
+ clustervar = clustervars[i_cluster]
819
+ if (doflist_M_is_exact[i]) continue // nothing to do
820
+
821
+ if (cluster_data == .) {
822
+ if (strpos(clustervar, "#")) {
823
+ clustervar = subinstr(clustervars[i_cluster], "#", " ", .)
824
+ F = factor(clustervar, sample, ., "", 0, 0, ., 0)
825
+ cluster_data = F.levels
826
+ F = Factor() // clear
827
+ }
828
+ else {
829
+ cluster_data = __fload_data(clustervar, sample, 0)
830
+ }
831
+ }
832
+
833
+ if (factors[g].nested_within(cluster_data)) {
834
+ doflist_M[i] = factors[g].num_levels
835
+ doflist_M_is_exact[i] = doflist_M_is_nested[i] = 1
836
+ df_a_nested = df_a_nested + doflist_M[i]
837
+ idx[i_intercept] = 0
838
+ if (verbose > 0) printf("{txt} - categorical variable {res}%s{txt} is nested within a cluster variable, so it doesn't reduce DoF\n", absvar)
839
+ }
840
+ }
841
+ idx = select(idx, idx)
842
+ }
843
+ cluster_data = . // save memory
844
+ } // end of the two cluster checks (absvar is clustervar; absvar is nested within clustervar)
845
+
846
+
847
+ // (4) (Intercept-Only) Every intercept but the first has at least one redundant coef.
848
+ if (length(idx) > 1) {
849
+ if (verbose > 0) printf("{txt} - there is at least one redundant coef. for every set of FE intercepts after the first one\n")
850
+ doflist_M[offsets[idx[2..length(idx)]]] = J(1, length(idx)-1, 1) // Set DoF loss of all intercepts but the first one to 1
851
+ }
852
+
853
+ // (5) (Intercept-only) Mobility group algorithm
854
+ // Excluding those already solved, the first absvar is exact
855
+
856
+ if (length(idx)) {
857
+ i = idx[1]
858
+ doflist_M_is_exact[i] = 1
859
+ }
860
+
861
+ // Compute number of dijsoint subgraphs / mobility groups for each pair of remaining FEs
862
+ if (anyof(dofadjustments, "firstpair") | anyof(dofadjustments, "pairwise")) {
863
+ BG = BipartiteGraph()
864
+ bg_verbose = max(( verbose - 1 , 0 ))
865
+ pair_count = 0
866
+
867
+ for (i_intercept=1; i_intercept<=length(idx)-1; i_intercept++) {
868
+ for (j_intercept=i_intercept+1; j_intercept<=length(idx); j_intercept++) {
869
+ g = idx[i_intercept]
870
+ h = idx[j_intercept]
871
+ i = offsets[h]
872
+ BG.init(&factors[g], &factors[h], bg_verbose)
873
+ m = BG.init_zigzag()
874
+ ++pair_count
875
+ if (verbose > 0) printf("{txt} - mobility groups between FE intercepts #%f and #%f: {res}%f\n", g, h, m)
876
+ doflist_M[i] = max(( doflist_M[i] , m ))
877
+ if (j_intercept==2) doflist_M_is_exact[i] = 1
878
+ if (pair_count & anyof(dofadjustments, "firstpair")) break
879
+ }
880
+ if (pair_count & anyof(dofadjustments, "firstpair")) break
881
+ }
882
+ BG = BipartiteGraph() // clear
883
+ }
884
+ // TODO: add group3hdfe
885
+
886
+ // (6) See if cvars are zero (w/out intercept) or just constant (w/intercept)
887
+ if (anyof(dofadjustments, "continuous")) {
888
+ for (i=g=1; g<=G; g++) {
889
+ // If model has intercept, redundant cvars are those that are CONSTANT
890
+ // Without intercept, a cvar has to be zero within a FE for it to be redundant
891
+ // Since S.fes[g].x are already demeaned IF they have intercept, we don't have to worry about the two cases
892
+ has_int = intercepts[g]
893
+ if (has_int) i++
894
+ if (!num_slopes[g]) continue
895
+
896
+ data = asarray(factors[g].extra, "x")
897
+ assert(num_slopes[g]==cols(data))
898
+ results = J(1, cols(data), 0)
899
+ // float(1.1) - 1 == 2.384e-08 , so let's pick something bigger, 1e-6
900
+ zeros = J(1, cols(data), 1e-6)
901
+ // This can be speed up by moving the -if- outside the -for-
902
+ for (j = 1; j <= factors[g].num_levels; j++) {
903
+ tmp = colminmax(panelsubmatrix(data, j, factors[g].info))
904
+ if (has_int) {
905
+ results = results + ((tmp[2, .] - tmp[1, .]) :<= zeros)
906
+ }
907
+ else {
908
+ results = results + (colsum(abs(tmp)) :<= zeros)
909
+ }
910
+ }
911
+ data = .
912
+ if (sum(results)) {
913
+ if (has_int & verbose) printf("{txt} - the slopes in the FE #%f are constant for {res}%f{txt} levels, which don't reduce DoF\n", g, sum(results))
914
+ if (!has_int & verbose) printf("{txt} - the slopes in the FE #%f are zero for {res}%f{txt} levels, which don't reduce DoF\n", g, sum(results))
915
+ doflist_M[i..i+num_slopes[g]-1] = results
916
+ }
917
+ i = i + num_slopes[g]
918
+ }
919
+ }
920
+
921
+ // Store results (besides doflist_..., etc.)
922
+ doflist_K = J(1, G_extended, .)
923
+ for (g=1; g<=G; g++) {
924
+ i = offsets[g]
925
+ j = g==G ? G_extended : offsets[g+1]
926
+ doflist_K[i..j] = J(1, j-i+1, factors[g].num_levels)
927
+ }
928
+ df_a_initial = sum(doflist_K)
929
+ df_a_redundant = sum(doflist_M)
930
+ df_a = df_a_initial - df_a_redundant
931
+ }
932
+
933
+
934
+
935
+ `Void' FixedEffects::prune_1core()
936
+ {
937
+ // Note that we can't prune degree-2 nodes, or the graph stops being bipartite
938
+ `Integer' i, j, g
939
+ `Vector' subgraph_id
940
+
941
+ `Vector' idx
942
+ `RowVector' i_prune
943
+
944
+ // For now; too costly to use prune for G=3 and higher
945
+ // (unless there are *a lot* of degree-1 vertices)
946
+ if (G!=2) return //assert_msg(G==2, "G==2") // bugbug remove?
947
+
948
+ // Abort if the user set HDFE.prune = 0
949
+ if (!prune) return
950
+
951
+ // Pick two factors, and check if we really benefit from pruning
952
+ prune = 0
953
+ i_prune = J(1, 2, 0)
954
+ for (g=i=1; g<=2; g++) {
955
+ //if (intercepts[g] & !num_slopes[g] & factors[g].num_levels>100) {
956
+ if (intercepts[g] & !num_slopes[g]) {
957
+ i_prune[i++] = g // increments at the end
958
+ if (i > 2) { // success!
959
+ prune = 1
960
+ break
961
+ }
962
+ }
963
+ }
964
+
965
+ if (!prune) return
966
+
967
+ // for speed, the factor with more levels goes first
968
+ i = i_prune[1]
969
+ j = i_prune[2]
970
+ //if (factors[i].num_levels < factors[j].num_levels) swap(i, j) // bugbug uncomment it!
971
+ prune_g1 = i
972
+ prune_g2 = j
973
+
974
+ bg = BipartiteGraph()
975
+ bg.init(&factors[prune_g1], &factors[prune_g2], verbose)
976
+ (void) bg.init_zigzag(1) // 1 => save subgraphs into bg.subgraph_id
977
+ bg.compute_cores()
978
+ bg.prune_1core(weight)
979
+ num_pruned = bg.N_drop
980
+ }
981
+
982
+ // bugbug fix or remove this fn altogether
983
+ `Void' FixedEffects::_expand_1core(`Variables' y)
984
+ {
985
+ y = bg.expand_1core(y)
986
+ }
987
+
988
+
989
+ `Void' FixedEffects::save_touse(| `Varname' touse, `Boolean' replace)
990
+ {
991
+ `Integer' idx
992
+ `Vector' mask
993
+
994
+ // Set default arguments
995
+ if (args()<1 | touse=="") {
996
+ assert(tousevar != "")
997
+ touse = tousevar
998
+ }
999
+ // Note that args()==0 implies replace==1 (else how would you find the name)
1000
+ if (args()==0) replace = 1
1001
+ if (args()==1 | replace==.) replace = 0
1002
+
1003
+ if (verbose > 0) printf("\n{txt}## Saving e(sample)\n")
1004
+
1005
+ // Compute dummy vector
1006
+ mask = create_mask(st_nobs(), 0, sample, 1)
1007
+
1008
+ // Save vector as variable
1009
+ if (replace) {
1010
+ st_store(., touse, mask)
1011
+ }
1012
+ else {
1013
+ idx = st_addvar("byte", touse, 1)
1014
+ st_store(., idx, mask)
1015
+ }
1016
+ }
1017
+
1018
+
1019
+ `Void' FixedEffects::save_variable(`Varname' varname,
1020
+ `Variables' data,
1021
+ | `Varlist' varlabel)
1022
+ {
1023
+ `RowVector' idx
1024
+ `Integer' i
1025
+ idx = st_addvar("double", tokens(varname))
1026
+ st_store(sample, idx, data)
1027
+ if (args()>=3 & varlabel!="") {
1028
+ for (i=1; i<=cols(data); i++) {
1029
+ st_varlabel(idx[i], varlabel[i])
1030
+ }
1031
+ }
1032
+
1033
+ }
1034
+
1035
+
1036
+
1037
+ `Void' FixedEffects::post_footnote()
1038
+ {
1039
+ `Matrix' table
1040
+ `StringVector' rowstripe
1041
+ `StringRowVector' colstripe
1042
+ `String' text
1043
+
1044
+ assert(!missing(G))
1045
+ st_numscalar("e(N_hdfe)", G)
1046
+ st_numscalar("e(N_hdfe_extended)", G_extended)
1047
+ st_numscalar("e(df_a)", df_a)
1048
+ st_numscalar("e(df_a_initial)", df_a_initial)
1049
+ st_numscalar("e(df_a_redundant)", df_a_redundant)
1050
+ st_numscalar("e(df_a_nested)", df_a_nested)
1051
+ st_global("e(dofmethod)", invtokens(dofadjustments))
1052
+
1053
+ if (absvars == "") {
1054
+ absvars = extended_absvars = "_cons"
1055
+ }
1056
+
1057
+ st_global("e(absvars)", invtokens(absvars))
1058
+ text = invtokens(extended_absvars)
1059
+ text = subinstr(text, "1.", "")
1060
+ st_global("e(extended_absvars)", text)
1061
+
1062
+ // Absorbed degrees-of-freedom table
1063
+ table = (doflist_K \ doflist_M \ (doflist_K-doflist_M) \ !doflist_M_is_exact \ doflist_M_is_nested)'
1064
+ rowstripe = extended_absvars'
1065
+ rowstripe = J(rows(table), 1, "") , extended_absvars' // add equation col
1066
+ colstripe = "Categories" \ "Redundant" \ "Num Coefs" \ "Exact?" \ "Nested?" // colstripe cannot have dots on Stata 12 or earlier
1067
+ colstripe = J(cols(table), 1, "") , colstripe // add equation col
1068
+ st_matrix("e(dof_table)", table)
1069
+ st_matrixrowstripe("e(dof_table)", rowstripe)
1070
+ st_matrixcolstripe("e(dof_table)", colstripe)
1071
+
1072
+ st_numscalar("e(ic)", iteration_count)
1073
+ st_numscalar("e(drop_singletons)", drop_singletons)
1074
+ st_numscalar("e(num_singletons)", num_singletons)
1075
+ st_numscalar("e(N_full)", st_numscalar("e(N)") + num_singletons)
1076
+
1077
+ // Prune specific
1078
+ if (prune==1) {
1079
+ st_numscalar("e(pruned)", 1)
1080
+ st_numscalar("e(num_pruned)", num_pruned)
1081
+ }
1082
+
1083
+ if (!missing(finite_condition)) st_numscalar("e(finite_condition)", finite_condition)
1084
+ }
1085
+
1086
+
1087
+ `Void' FixedEffects::post()
1088
+ {
1089
+ `String' text
1090
+ `Integer' i
1091
+
1092
+ post_footnote()
1093
+
1094
+ // ---- constants -------------------------------------------------------
1095
+
1096
+ st_global("e(predict)", "reghdfe_p")
1097
+ st_global("e(estat_cmd)", "reghdfe_estat")
1098
+ st_global("e(footnote)", "reghdfe_footnote")
1099
+ //st_global("e(marginsok)", "")
1100
+ st_global("e(marginsnotok)", "Residuals SCore")
1101
+ st_numscalar("e(df_m)", df_m)
1102
+
1103
+
1104
+ assert(title != "")
1105
+ text = sprintf("HDFE %s", title)
1106
+ st_global("e(title)", text)
1107
+
1108
+ text = sprintf("Absorbing %g HDFE %s", G, plural(G, "group"))
1109
+ st_global("e(title2)", text)
1110
+
1111
+ st_global("e(model)", model)
1112
+ st_global("e(cmdline)", cmdline)
1113
+
1114
+ st_numscalar("e(tss)", tss)
1115
+ st_numscalar("e(tss_within)", tss_within)
1116
+ st_numscalar("e(rss)", rss)
1117
+ st_numscalar("e(mss)", tss - rss)
1118
+ st_numscalar("e(rmse)", rmse)
1119
+ st_numscalar("e(F)", F)
1120
+
1121
+ st_numscalar("e(ll)", ll)
1122
+ st_numscalar("e(ll_0)", ll_0)
1123
+
1124
+ st_numscalar("e(r2)", r2)
1125
+ st_numscalar("e(r2_within)", r2_within)
1126
+ st_numscalar("e(r2_a)", r2_a)
1127
+ st_numscalar("e(r2_a_within)", r2_a_within)
1128
+
1129
+ if (!missing(N_clust)) {
1130
+ st_numscalar("e(N_clust)", N_clust)
1131
+ for (i=1; i<=num_clusters; i++) {
1132
+ text = sprintf("e(N_clust%g)", i)
1133
+ st_numscalar(text, N_clust_list[i])
1134
+ }
1135
+ text = "Statistics robust to heteroskedasticity"
1136
+ st_global("e(title3)", text)
1137
+ }
1138
+
1139
+ if (!missing(sumweights)) st_numscalar("e(sumweights)", sumweights)
1140
+
1141
+ st_numscalar("e(report_constant)", compute_constant & report_constant)
1142
+
1143
+
1144
+ // ---- .options properties ---------------------------------------------
1145
+
1146
+ st_global("e(depvar)", depvar)
1147
+ st_global("e(indepvars)", invtokens(indepvars))
1148
+
1149
+ if (!missing(N_clust)) {
1150
+ st_numscalar("e(N_clustervars)", num_clusters)
1151
+ st_global("e(clustvar)", invtokens(clustervars))
1152
+ for (i=1; i<=num_clusters; i++) {
1153
+ text = sprintf("e(clustvar%g)", i)
1154
+ st_global(text, clustervars[i])
1155
+ }
1156
+ }
1157
+
1158
+ if (residuals != "") {
1159
+ st_global("e(resid)", residuals)
1160
+ }
1161
+
1162
+ // Stata uses e(vcetype) for the SE column headers
1163
+ // In the default option, leave it empty.
1164
+ // In the cluster and robust options, set it as "Robust"
1165
+ text = strproper(vcetype)
1166
+ if (text=="Cluster") text = "Robust"
1167
+ if (text=="Unadjusted") text = ""
1168
+ assert(anyof( ("", "Robust", "Jackknife", "Bootstrap") , text))
1169
+ if (text!="") st_global("e(vcetype)", text)
1170
+
1171
+ text = vcetype
1172
+ if (text=="unadjusted") text = "ols"
1173
+ st_global("e(vce)", text)
1174
+
1175
+ // Weights
1176
+ if (weight_type != "") {
1177
+ st_global("e(wexp)", "= " + weight_var)
1178
+ st_global("e(wtype)", weight_type)
1179
+ }
1180
+ }
1181
+
1182
+
1183
+ // --------------------------------------------------------------------------
1184
+ // Recreate HDFE object
1185
+ // --------------------------------------------------------------------------
1186
+ `FixedEffects' FixedEffects::reload(`Boolean' copy)
1187
+ {
1188
+ `FixedEffects' ans
1189
+ assert(copy==0 | copy==1)
1190
+
1191
+ // Trim down current object as much as possible
1192
+ // this. is optional but useful for clarity
1193
+ if (copy==0) {
1194
+ this.factors = Factor()
1195
+ this.sample = .
1196
+ this.bg = BipartiteGraph()
1197
+ this.pruned_weight = .
1198
+ this.rre_varname = .
1199
+ this.rre_true_residual = .
1200
+ }
1201
+
1202
+ // Initialize new object
1203
+ ans = fixed_effects(this.absorb, this.tousevar, this.weight_type, this.weight_var, this.drop_singletons, this.verbose)
1204
+
1205
+ // Fill out new object with values of current one
1206
+ ans.depvar = this.depvar
1207
+ ans.indepvars = this.indepvars
1208
+ ans.varlist = this.varlist
1209
+ ans.model = this.model
1210
+ ans.vcetype = this.vcetype
1211
+ ans.num_clusters = this.num_clusters
1212
+ ans.clustervars = this.clustervars
1213
+ ans.base_clustervars = this.base_clustervars
1214
+ ans.vceextra = this.vceextra
1215
+ ans.summarize_stats = this.summarize_stats
1216
+ ans.summarize_quietly = this.summarize_quietly
1217
+ ans.notes = this.notes
1218
+ ans.store_sample = this.store_sample
1219
+ ans.timeit = this.timeit
1220
+ ans.compact = this.compact
1221
+ ans.poolsize = this.poolsize
1222
+ ans.diopts = this.diopts
1223
+
1224
+ ans.fullindepvars = this.fullindepvars
1225
+ ans.not_basevar = this.not_basevar
1226
+
1227
+ ans.compute_constant = this.compute_constant
1228
+ ans.report_constant = this.report_constant
1229
+ ans.tolerance = this.tolerance
1230
+ ans.save_any_fe = this.save_any_fe
1231
+
1232
+ ans.slope_method = this.slope_method
1233
+ ans.maxiter = this.maxiter
1234
+ ans.transform = this.transform
1235
+ ans.acceleration = this.acceleration
1236
+ ans.accel_start = this.accel_start
1237
+ ans.conlim = this.conlim
1238
+ ans.btol = this.btol
1239
+ ans.min_ok = this.min_ok
1240
+ ans.prune = this.prune
1241
+ ans.always_run_lsmr_preconditioner = this.always_run_lsmr_preconditioner
1242
+
1243
+ return(ans)
1244
+ }
1245
+
1246
+
1247
+ // --------------------------------------------------------------------------
1248
+ // Estimate finite condition number of the graph Laplacian
1249
+ // --------------------------------------------------------------------------
1250
+ `Void' FixedEffects::estimate_cond()
1251
+ {
1252
+ `Matrix' D1, D2, L
1253
+ `Vector' lambda
1254
+ `RowVector' tmp
1255
+ `Factor' F12
1256
+
1257
+ if (finite_condition!=-1) return
1258
+
1259
+ if (verbose > 0) printf("\n{txt}## Computing finite condition number of the Laplacian\n\n")
1260
+
1261
+ if (verbose > 0) printf("{txt} - Constructing vectors of levels\n")
1262
+ F12 = join_factors(factors[1], factors[2], ., ., 1)
1263
+
1264
+ // Non-sparse (lots of memory usage!)
1265
+ if (verbose > 0) printf("{txt} - Constructing design matrices\n")
1266
+ D1 = designmatrix(F12.keys[., 1])
1267
+ D2 = designmatrix(F12.keys[., 2])
1268
+ assert_msg(rows(D1)<=2000, "System is too big!")
1269
+ assert_msg(rows(D2)<=2000, "System is too big!")
1270
+
1271
+ if (verbose > 0) printf("{txt} - Constructing graph Laplacian\n")
1272
+ L = D1' * D1 , - D1' * D2 \
1273
+ - D2' * D1 , D2' * D2
1274
+ if (verbose > 0) printf("{txt} - L is %g x %g \n", rows(L), rows(L))
1275
+
1276
+ if (verbose > 0) printf("{txt} - Computing eigenvalues\n")
1277
+ assert_msg(rows(L)<=2000, "System is too big!")
1278
+ eigensystem(L, ., lambda=.)
1279
+ lambda = Re(lambda')
1280
+
1281
+ if (verbose > 0) printf("{txt} - Selecting positive eigenvalues\n")
1282
+ lambda = edittozerotol(lambda, 1e-8)
1283
+ tmp = select(lambda, edittozero(lambda, 1))
1284
+ tmp = minmax(tmp)
1285
+ finite_condition = tmp[2] / tmp[1]
1286
+
1287
+ if (verbose > 0) printf("{txt} - Finite condition number: {res}%s{txt}\n", strofreal(finite_condition))
1288
+ }
1289
+
1290
+
1291
+ `Real' FixedEffects::lsmr_norm(`Matrix' x)
1292
+ {
1293
+ assert(cols(x)==1 | rows(x)==1)
1294
+ // BUGBUG: what if we have a corner case where there are as many obs as params?
1295
+ if (has_weights & cols(x)==1 & rows(x)==rows(weight)) {
1296
+ return(sqrt(quadcross(x, weight, x)))
1297
+ }
1298
+ else if (cols(x)==1) {
1299
+ return(sqrt(quadcross(x, x)))
1300
+ }
1301
+ else {
1302
+ return(sqrt(quadcross(x', x')))
1303
+ }
1304
+ }
1305
+
1306
+
1307
+ // Ax: given the coefs 'x', return the projection 'Ax'
1308
+ `Vector' FixedEffects::lsmr_A_mult(`Vector' x)
1309
+ {
1310
+ `Integer' g, k, idx_start, idx_end, i
1311
+ `Vector' ans
1312
+ `FactorPointer' pf
1313
+
1314
+ ans = J(N, 1, 0)
1315
+ idx_start = 1
1316
+
1317
+ for (g=1; g<=G; g++) {
1318
+ pf = &(factors[g])
1319
+ k = (*pf).num_levels
1320
+
1321
+ if (intercepts[g]) {
1322
+ idx_end = idx_start + k - 1
1323
+ ans = ans + (x[|idx_start, 1 \ idx_end , 1 |] :* asarray((*pf).extra, "precond_intercept") )[(*pf).levels, .]
1324
+ idx_start = idx_end + 1
1325
+ }
1326
+
1327
+ if (num_slopes[g]) {
1328
+ for (i=1; i<=num_slopes[g]; i++) {
1329
+ idx_end = idx_start + k - 1
1330
+ ans = ans + x[|idx_start, 1 \ idx_end , 1 |][(*pf).levels] :* asarray((*pf).extra, "precond_slopes")
1331
+ idx_start = idx_end + 1
1332
+ }
1333
+ }
1334
+
1335
+ }
1336
+ //assert(!missing(ans))
1337
+ return(ans)
1338
+ }
1339
+
1340
+
1341
+ // A'x: Compute the FEs and store them in a big stacked vector
1342
+ `Vector' FixedEffects::lsmr_At_mult(`Vector' x)
1343
+ {
1344
+ `Integer' m, g, i, idx_start, idx_end, k
1345
+ `Vector' ans
1346
+ `FactorPointer' pf
1347
+ `Vector' alphas
1348
+ `Matrix' tmp_alphas
1349
+
1350
+ alphas = J(M, 1, .)
1351
+ idx_start = 1
1352
+
1353
+ for (g=1; g<=G; g++) {
1354
+ pf = &(factors[g])
1355
+ k = (*pf).num_levels
1356
+
1357
+ if (intercepts[g]) {
1358
+ idx_end = idx_start + k - 1
1359
+ if (has_weights) {
1360
+ alphas[| idx_start , 1 \ idx_end , 1 |] = `panelsum'((*pf).sort(x :* weight), (*pf).info) :* asarray((*pf).extra, "precond_intercept")
1361
+ }
1362
+ else {
1363
+ alphas[| idx_start , 1 \ idx_end , 1 |] = `panelsum'((*pf).sort(x), (*pf).info) :* asarray((*pf).extra, "precond_intercept")
1364
+ }
1365
+ idx_start = idx_end + 1
1366
+ }
1367
+
1368
+ if (num_slopes[g]) {
1369
+ idx_end = idx_start + k * num_slopes[g] - 1
1370
+ if (has_weights) {
1371
+ tmp_alphas = `panelsum'((*pf).sort(x :* weight :* asarray((*pf).extra, "precond_slopes")), (*pf).info)
1372
+ }
1373
+ else {
1374
+ tmp_alphas = `panelsum'((*pf).sort(x :* asarray((*pf).extra, "precond_slopes")), (*pf).info)
1375
+ }
1376
+ alphas[| idx_start , 1 \ idx_end , 1 |] = vec(tmp_alphas)
1377
+ idx_start = idx_end + 1
1378
+ }
1379
+ }
1380
+ //assert(!missing(alphas))
1381
+ return(alphas)
1382
+ }
1383
+
1384
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_common.mata ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Common functions ---------------------------------------------------------
2
+ mata:
3
+
4
+ // --------------------------------------------------------------------------
5
+ // BUGBUG: not sure if this is still used...
6
+ // --------------------------------------------------------------------------
7
+ `StringRowVector' clean_tokens(`String' vars)
8
+ {
9
+ `StringRowVector' ans
10
+ `Integer' i
11
+ ans = tokens(vars)
12
+ for (i=1; i<=cols(ans); i++) {
13
+ ans[i] = invtokens(tokens(ans[i]))
14
+ }
15
+ return(ans)
16
+ }
17
+
18
+
19
+ // --------------------------------------------------------------------------
20
+ // Workaround to st_data's odd behavior
21
+ // --------------------------------------------------------------------------
22
+ // This does three things:
23
+ // 1) Wrap up interactions in parens (up to four) to avoid Stata's quirk/bug
24
+ // 2) If issue persists, load columns one-by-one
25
+ // 1) Instead of returning it reuses existing matrices (might use less mem?)
26
+ //
27
+ // Example of the issue:
28
+ // sysuse auto, clear
29
+ // mata: cols(st_data(., "1.rep78 2.rep78 3.rep78#1.foreign")) // expected 3, got 6
30
+ // Happens b/c st_data doesn't work variable by variable but expands the interactions
31
+ // We can partly fix it by surrounding interactions with parens
32
+ // But st_data() only supports up to 4 parens
33
+
34
+
35
+ `Void' _st_data_wrapper(`Variables' index, `StringRowVector' vars, `Variables' data, `Boolean' verbose)
36
+ {
37
+ `RowVector' is_interaction
38
+ `StringRowVector' fixed_vars
39
+ `Integer' i, k
40
+
41
+ vars = tokens(invtokens(vars))
42
+
43
+ // Add parenthesis only for Stata 11-14, as on Stata 15+ they are i) not needed and ii) corrupt output
44
+ // For i) see "help set fvtrack"
45
+ // For ii) see "test/stdata3.do" on Github
46
+ if (st_numscalar("c(stata_version)") < 15) {
47
+ is_interaction = strpos(vars, "#") :> 0
48
+ is_interaction = is_interaction :& (runningsum(is_interaction) :<= 4) // Only up to 4 parenthesis supported
49
+ fixed_vars = subinstr(strofreal(is_interaction), "0", "")
50
+ fixed_vars = subinstr(fixed_vars, "1", "(") :+ vars :+ subinstr(fixed_vars, "1", ")")
51
+ }
52
+ else {
53
+ fixed_vars = vars
54
+ }
55
+
56
+ // Override code above, to minimize any risk of incorrect data
57
+ // Since this is an undocumented feature, it might or might not work on some older versions of Stata
58
+ // (See also email from [email protected])
59
+ fixed_vars = vars
60
+
61
+ data = st_data(index, fixed_vars)
62
+ k = cols(vars)
63
+
64
+ if (cols(data) > k) {
65
+ if (verbose > 0) printf("{err}(some empty columns were added due to a bug/quirk in {bf:st_data()}; %g cols created instead of %g for {it:%s}; running slower workaround)\n", cols(data), k, invtokens(vars))
66
+ data = J(rows(data), 0, .)
67
+ for (i=1; i<=k; i++) {
68
+ data = data, st_data(index, vars[i])
69
+ }
70
+ }
71
+ assert(cols(data)==k)
72
+ }
73
+
74
+
75
+ // --------------------------------------------------------------------------
76
+ // Each col of A will have stdev of 1 unless stdev is quite close to 0
77
+ // --------------------------------------------------------------------------
78
+ `RowVector' function reghdfe_standardize(`Matrix' A)
79
+ {
80
+ `RowVector' stdevs, means
81
+ `Integer' K, N // i,
82
+
83
+ // We don't need to good accuracy for the stdevs, so we have a few alternatives:
84
+ // Note: cross(1,A) is the same as colsum(A), but faster
85
+ // Note: cross(A, A) is very fast, but we only need the main diagonals
86
+ // [A: 1sec] stdevs = sqrt( (colsum(A:*A) - (cross(1, A) :^ 2 / N)) / (N-1) )
87
+ // [B: .61s] stdevs = sqrt( (diagonal(cross(A, A))' - (cross(1, A) :^ 2 / N)) / (N-1) )
88
+ // [C: .80s] stdevs = diagonal(sqrt(variance(A)))'
89
+ // [D: .67s] means = cross(1, A) / N; stdevs = sqrt(diagonal(crossdev(A, means, A, means))' / (N-1))
90
+
91
+ assert_msg(!isfleeting(A), "input cannot be fleeting")
92
+ N = rows(A)
93
+ K = cols(A)
94
+
95
+ stdevs = J(1, K, .)
96
+
97
+ // (A) Very precise
98
+
99
+ // (B) Precise
100
+ // means = cross(1, A) / N
101
+ // stdevs = sqrt(diagonal(quadcrossdev(A, means, A, means))' / (N-1))
102
+
103
+ // (C) 20% faster; don't use it if you care about accuracy
104
+ stdevs = sqrt( (diagonal(cross(A, A))' - (cross(1, A) :^ 2 / N)) / (N-1) )
105
+ assert_msg(!missing(stdevs), "stdevs are missing; is N==1?") // Shouldn't happen as we don't expect N==1
106
+ stdevs = colmax(( stdevs \ J(1, K, 1e-3) ))
107
+ A = A :/ stdevs
108
+
109
+ // (D) Equilibrate matrix columns instead of standardize (i.e. just divide by column max)
110
+ // _perhapsequilc(A, stdevs=.)
111
+ // stdevs = 1 :/ stdevs
112
+ // assert_msg(!missing(stdevs), "stdevs are missing; is N==1?")
113
+
114
+ // (E) Don't do anything
115
+ // stdevs = J(1, cols(A), 1)
116
+
117
+ return(stdevs)
118
+ }
119
+
120
+
121
+ // --------------------------------------------------------------------------
122
+ // Divide two row vectors but adjust the denominator if it's too small
123
+ // --------------------------------------------------------------------------
124
+ `RowVector' safe_divide(`RowVector' numerator, `RowVector' denominator, | `Real' epsi) {
125
+ // If the numerator goes below machine precision, we lose accuracy
126
+ // If the denominator goes below machine precision, the division explodes
127
+ if (args()<3 | epsi==.) epsi = epsilon(1)
128
+ return( numerator :/ colmax(denominator \ J(1,cols(denominator),epsi)) )
129
+ }
130
+
131
+
132
+ // If X is not square...
133
+ // `Matrix' R
134
+ // real colvector tau, p
135
+
136
+ // _hqrdp(A, tau, R, p=.)
137
+ // B = hqrdmultq1t(A, tau, B)
138
+ // rank = _solveupper(R, B, tol)
139
+ // B = B[invorder(p),.]
140
+ // +- +-
141
+
142
+ // invsym(makesymmetric(..))
143
+
144
+
145
+
146
+
147
+
148
+ // --------------------------------------------------------------------------
149
+ // Robust solver for Ax=b
150
+ // --------------------------------------------------------------------------
151
+ // Mata utility for sequential use of solvers
152
+ // Default is cholesky;
153
+ // if that fails, use QR;
154
+ // if overridden, use QR.
155
+ // Author: Schaffer, Mark E <[email protected]>
156
+ // --------------------------------------------------------------------------
157
+ // Warning:
158
+ // cholqrsolve calls qrsolve which calls _qrsolve which calls ...
159
+ // Does all the indirection makes it too slow to use within a panel?
160
+ // --------------------------------------------------------------------------
161
+ `Matrix' function reghdfe_cholqrsolve(`Matrix' A,
162
+ `Matrix' B,
163
+ | `Boolean' useqr)
164
+ {
165
+ `Matrix' C
166
+ if (args()<3 | useqr==.) useqr = 0
167
+
168
+ if (!useqr) {
169
+ C = cholsolve(A, B)
170
+ if (hasmissing(C)) useqr = 1
171
+ }
172
+
173
+ if (useqr) {
174
+ C = qrsolve(A, B)
175
+ }
176
+
177
+ return(C)
178
+ }
179
+
180
+
181
+ // --------------------------------------------------------------------------
182
+ // OLS Regression
183
+ // --------------------------------------------------------------------------
184
+ `Void' function reghdfe_post_ols(`FixedEffects' S,
185
+ `Variables' X,
186
+ `String' bname,
187
+ `String' Vname,
188
+ `String' nname,
189
+ `String' rname,
190
+ `String' dfrname)
191
+ {
192
+ `Integer' N
193
+ `Integer' rank
194
+ `Integer' df_r
195
+ `Vector' b
196
+ `Matrix' V
197
+ `Variable' resid
198
+ `Real' eps
199
+ `Integer' i
200
+ `RowVector' kept
201
+ `Vector' not_basevar
202
+
203
+
204
+ `Vector' idx
205
+ `Vector' temp_b
206
+ `Matrix' temp_V
207
+ `Integer' k
208
+
209
+ if (S.timeit) timer_on(90)
210
+ reghdfe_solve_ols(S, X, b=., V=., N=., rank=., df_r=., resid=., kept=., "vce_small")
211
+ assert(cols(X) - 1 == rows(b) - S.compute_constant) // The 1st column of X is actually Y
212
+ assert((rows(b) == rows(V)) & (rows(b) == cols(V)))
213
+ if (S.timeit) timer_off(90)
214
+
215
+ // Add base vars
216
+ if (S.compute_constant) {
217
+ if (S.verbose > 1) printf("\n{txt}## Adding _cons to varlist\n")
218
+ assert_msg(rows(S.not_basevar) == 1, "rows(S.not_basevar) == 1")
219
+ S.not_basevar = S.not_basevar, 1
220
+ S.fullindepvars = S.fullindepvars + " _cons"
221
+ S.indepvars = S.indepvars + " _cons"
222
+ }
223
+ if (S.not_basevar != J(1, 0, .)) {
224
+ if (S.verbose > 1) printf("\n{txt}## Adding base variables to varlist\n")
225
+ k = cols(S.not_basevar)
226
+ assert_msg(cols(S.not_basevar) == k, "cols(S.not_basevar) == k")
227
+ idx = `selectindex'(S.not_basevar)
228
+ swap(b, temp_b)
229
+ swap(V, temp_V)
230
+ b = J(k, 1, 0)
231
+ V = J(k, k, 0)
232
+ b[idx, 1] = temp_b
233
+ V[idx, idx] = temp_V
234
+ }
235
+
236
+ st_matrix(bname, b')
237
+
238
+ if (S.verbose > 1) printf("\n{txt}## Reporting omitted variables\n")
239
+ // Add "o." prefix to omitted regressors
240
+ eps = sqrt(epsilon(1))
241
+ for (i=1; i<=rows(b); i++) {
242
+ if (b[i]==0 & S.not_basevar[i] & S.verbose > -1) {
243
+ printf("{txt}note: %s omitted because of collinearity\n", tokens(S.fullindepvars)[i])
244
+ //stata(sprintf("_ms_put_omit %s", indepvars[i]))
245
+ //indepvars[i] = st_global("s(ospec)")
246
+ // This is now one in reghdfe.ado with -_ms_findomitted-
247
+ }
248
+ }
249
+
250
+ st_matrix(Vname, V)
251
+ st_numscalar(nname, N)
252
+ st_numscalar(rname, rank)
253
+ st_numscalar(dfrname, df_r)
254
+
255
+ // Need to save resids if saving FEs, even if temporarily
256
+ if (S.residuals == "" & S.save_any_fe) {
257
+ S.residuals = "__temp_reghdfe_resid__"
258
+ }
259
+
260
+ if (S.residuals != "") {
261
+ if (S.verbose > 0) printf("\n{txt}## Storing residuals in {res}%s{txt}\n\n", S.residuals)
262
+ if (S.compact == 1) {
263
+ S.residuals_vector = resid
264
+ }
265
+ else {
266
+ S.save_variable(S.residuals, resid, "Residuals")
267
+ }
268
+ }
269
+ }
270
+
271
+
272
+ `Void' function reghdfe_solve_ols(`FixedEffects' S,
273
+ `Variables' X,
274
+ `Vector' b,
275
+ `Matrix' V,
276
+ `Integer' N,
277
+ `Integer' rank,
278
+ `Integer' df_r,
279
+ `Vector' resid,
280
+ `RowVector' kept,
281
+ `String' vce_mode,
282
+ | `Variable' true_w)
283
+ {
284
+ // Hack: the first col of X is actually y!
285
+ `Integer' K, KK, tmp_N
286
+ `Matrix' xx, inv_xx, W, inv_V, just_X
287
+ `Vector' w
288
+ `Integer' used_df_r
289
+ `Integer' dof_adj
290
+
291
+ `Boolean' is_standardized
292
+ `Real' stdev_y
293
+ `RowVector' stdev_x
294
+
295
+ if (true_w == . | args() < 11) true_w = J(0, 1, .)
296
+ if (S.vcetype == "unadjusted" & S.weight_type=="pweight") S.vcetype = "robust"
297
+ if (S.verbose > 0) printf("\n{txt}## Solving least-squares regression of partialled-out variables\n\n")
298
+ assert_in(vce_mode, ("vce_none", "vce_small", "vce_asymptotic"))
299
+
300
+ is_standardized = S.all_stdevs != J(1, 0, .)
301
+ if (is_standardized) S.means = S.means :/ S.all_stdevs
302
+
303
+ // Weight FAQ:
304
+ // - fweight: obs. i represents w[i] duplicate obs. (there is no loss of info wrt to having the "full" dataset)
305
+ // - aweight: obs. i represents w[i] distinct obs. that were mean-collapsed (so there is loss of info and hetero)
306
+ // soln: normalize them so they sum to N (the true number of obs in our sample), and then treat them as fweight
307
+ // - pweight: each obs. represents only one obs. from the pop, that was drawn from w[i] individuals
308
+ // we want to make inference on the population, so if we interviewed 100% of the men and only 10% of women,
309
+ // then without weighting we would be over-representing men, which leads to a loss of efficiency +-+-
310
+ // it is the same as aweight + robust
311
+ // We need to pick N and w
312
+ N = rows(X) // Default; will change with fweights
313
+ S.sumweights = S.weight_type != "" ? quadsum(S.weight) : N
314
+ assert(rows(S.means) == 1)
315
+ assert(cols(S.means) == cols(X))
316
+
317
+ w = 1
318
+ if (rows(true_w)) {
319
+ // Custom case for IRLS (ppmlhdfe) where S.weight = mu * true_w
320
+ assert_msg(S.weight_type == "aweight")
321
+ N = sum(true_w)
322
+ w = S.weight * sum(true_w) / sum(S.weight)
323
+ }
324
+ else if (S.weight_type=="fweight") {
325
+ N = S.sumweights
326
+ w = S.weight
327
+ }
328
+ else if (S.weight_type=="aweight" | S.weight_type=="pweight") {
329
+ w = S.weight * (N / S.sumweights)
330
+ }
331
+
332
+ // Build core matrices
333
+ if (S.timeit) timer_on(91)
334
+
335
+ K = cols(X) - 1
336
+ xx = quadcross(X, w, X)
337
+ S.tss_within = xx[1,1]
338
+ xx = K ? xx[| 2 , 2 \ K+1 , K+1 |] : J(0, 0, .)
339
+ if (S.timeit) timer_off(91)
340
+
341
+ // This matrix indicates what regressors are not collinear
342
+ assert_msg(cols(S.kept)==K+1, "partial_out() was run with a different set of vars")
343
+
344
+ // Bread of the robust VCV matrix
345
+ // Compute this early so we can update the list of collinear regressors
346
+ if (S.timeit) timer_on(95)
347
+ assert_msg( cols(tokens(invtokens(S.indepvars)))==cols(xx) , "HDFE.indepvars is missing or has the wrong number of columns")
348
+ inv_xx = reghdfe_rmcoll(tokens(invtokens(S.indepvars)), xx, kept) // this modifies -kept-
349
+
350
+ // // Workaround for case with extremely high weights, where ivnsym loses precision and incorrectly excludes vars
351
+ // if (S.has_weights) {
352
+ // if (max(S.weight) > 1e5) {
353
+ // kept = (1..K)
354
+ // }
355
+ // }
356
+
357
+ S.df_m = rank = K - diag0cnt(inv_xx)
358
+ KK = S.df_a + S.df_m
359
+ S.df_r = N - KK // replaced when clustering
360
+ if (S.timeit) timer_off(95)
361
+
362
+ // Compute betas
363
+ // - There are two main options
364
+ // a) Use cholqrsolve on xx and xy. Faster but numerically inaccurate
365
+ // See: http://www.stata.com/statalist/archive/2012-02/msg00956.html
366
+ // b) Use qrsolve. More accurate but doesn't handle weights easily
367
+ // - Ended up doing (b) with a hack for weights
368
+ b = J(K, 1, 0)
369
+ if (cols(kept)) {
370
+ if (S.has_weights) {
371
+ b[kept] = qrsolve(X[., 1:+kept] :* sqrt(S.weight), X[., 1] :* sqrt(S.weight))
372
+ }
373
+ else {
374
+ b[kept] = qrsolve(X[., 1:+kept], X[., 1])
375
+ }
376
+ }
377
+
378
+ if (S.timeit) timer_on(92)
379
+ if (!isfleeting(resid) | vce_mode != "vce_none") resid = X * (1 \ -b) // y - X * b
380
+ if (S.timeit) timer_off(92)
381
+
382
+ if (S.compute_constant) {
383
+ tmp_N = (S.weight_type=="aweight" | S.weight_type=="pweight") ? N : S.sumweights
384
+ if (rows(true_w)) tmp_N = N
385
+ reghdfe_extend_b_and_inv_xx(S.means, tmp_N, b, inv_xx)
386
+ }
387
+
388
+ // Stop if no VCE/R2/RSS needed
389
+ if (vce_mode == "vce_none") {
390
+ assert(!is_standardized)
391
+ return
392
+ }
393
+
394
+ if (S.timeit) timer_on(93)
395
+ if (S.vcetype != "unadjusted") {
396
+ if (S.compute_constant) {
397
+ if (isfleeting(X)) {
398
+ // Save some memory... unsure if it helps
399
+ swap(just_X, X)
400
+ just_X = K ? just_X[., 2..K+1] :+ S.means[2..cols(S.means)] : J(rows(just_X), 0, .)
401
+ }
402
+ else {
403
+ just_X = K ? X[., 2..K+1] :+ S.means[2..cols(S.means)] : J(rows(X), 0, .)
404
+ }
405
+ }
406
+ else {
407
+ just_X = K ? X[., 2..K+1] : J(rows(X), 0, .)
408
+ }
409
+ }
410
+ if (S.timeit) timer_off(93)
411
+
412
+ if (S.timeit) timer_on(94)
413
+ S.rss = quadcross(resid, w, resid) // do before reghdfe_robust() modifies w
414
+ if (S.timeit) timer_off(94)
415
+
416
+ // Compute full VCE
417
+ if (S.timeit) timer_on(96)
418
+ assert_msg(anyof( ("unadjusted", "robust", "cluster") , S.vcetype), "invalid vcetype" + S.vcetype)
419
+ if (S.vcetype == "unadjusted") {
420
+ if (S.verbose > 0) {
421
+ printf("{txt} - Small-sample-adjustment: q = N / (N-df_m-df_a) = %g / (%g - %g - %g) = %g\n", N, N, rank, S.df_a, N / S.df_r )
422
+ }
423
+ dof_adj = N / S.df_r
424
+ if (vce_mode == "vce_asymptotic") dof_adj = N / (N-1) // 1.0
425
+ V = (S.rss / N) * dof_adj * inv_xx
426
+ }
427
+ else if (S.vcetype == "robust") {
428
+ V = reghdfe_robust(S, just_X, inv_xx, resid, w, N, KK, vce_mode, true_w)
429
+ }
430
+ else {
431
+ V = reghdfe_cluster(S, just_X, inv_xx, resid, w, N, KK, vce_mode)
432
+ }
433
+ if (S.timeit) timer_off(96)
434
+
435
+ // Wald test: joint significance
436
+ if (S.timeit) timer_on(97)
437
+ inv_V = invsym(V[kept, kept]) // this might not be of full rank but numerical inaccuracies hide it
438
+ if (diag0cnt(inv_V)) {
439
+ if (S.verbose > -1) printf("{txt}warning: missing F statistic; dropped variables due to collinearity or too few clusters\n")
440
+ W = .
441
+ }
442
+ else if (length(b[kept])==0) {
443
+ W = .
444
+ }
445
+ else {
446
+ // We could probably do this with the simpler formula instead of Wald
447
+ W = b[kept]' * inv_V * b[kept] / S.df_m
448
+ if (missing(W) & S.verbose > -1) printf("{txt}warning: missing F statistic\n")
449
+ }
450
+ if (S.timeit) timer_off(97)
451
+
452
+ // V can be missing if b is completely absorbed by the FEs
453
+ if (missing(V)) {
454
+ if (S.verbose > 0) printf("{txt} - VCE has missing values, setting it to zeroes (are your regressors all collinear?)\n")
455
+ V = J(rows(V), rows(V), 0)
456
+ }
457
+
458
+ // Undo standardization
459
+ if (is_standardized) {
460
+ // Sanity checks
461
+ assert(rows(S.all_stdevs)==1)
462
+ assert(cols(S.all_stdevs) - 1 == rows(b) - S.compute_constant) // Subtract "y" on left; subtract "_cons" on right
463
+
464
+ // Recover stdevs
465
+ stdev_y = S.all_stdevs[1]
466
+ stdev_x = K ? S.all_stdevs[2..cols(S.all_stdevs)] : J(1, 0, .)
467
+ if (S.compute_constant) stdev_x = stdev_x, 1
468
+ stdev_x = stdev_x :/ stdev_y
469
+
470
+ // Transform output (note that S.tss is already ok)
471
+ S.rss = S.rss * stdev_y ^ 2
472
+ S.tss_within = S.tss_within * stdev_y ^ 2
473
+ resid = resid * stdev_y
474
+ V = V :/ (stdev_x' * stdev_x)
475
+ b = b :/ stdev_x'
476
+ }
477
+
478
+ // Results
479
+ S.title = "Linear regression"
480
+ // S.model = "ols"
481
+ used_df_r = N - KK - S.df_a_nested
482
+ S.r2 = 1 - S.rss / S.tss
483
+ S.r2_a = 1 - (S.rss / used_df_r) / (S.tss / (N - S.has_intercept ) )
484
+ S.r2_within = 1 - S.rss / S.tss_within
485
+ S.r2_a_within = 1 - (S.rss / used_df_r) / (S.tss_within / (used_df_r + rank))
486
+
487
+ S.ll = - 0.5 * N * (1 + ln(2 * pi()) + ln(S.rss / N))
488
+ S.ll_0 = - 0.5 * N * (1 + ln(2 * pi()) + ln(S.tss_within / N))
489
+
490
+ S.rmse = sqrt(S.rss / used_df_r)
491
+ if (used_df_r==0) S.rmse = sqrt(S.rss)
492
+ S.F = W
493
+ df_r = S.df_r // reghdfe_cluster might have updated it (this gets returned to the caller function)
494
+ }
495
+
496
+
497
+ // --------------------------------------------------------------------------
498
+ // Robust VCE
499
+ // --------------------------------------------------------------------------
500
+ // Advice: Delegate complicated regressions to -avar- and specialized routines
501
+ // BUGBUG: do we standardize X again? so V is well behaved?
502
+ // Notes:
503
+ // - robust is the same as cluster robust where cluster==_n
504
+ // - cluster just "collapses" X_i * e_i for each group, and builds M from that
505
+
506
+ `Matrix' reghdfe_robust(`FixedEffects' S,
507
+ `Variables' X,
508
+ `Matrix' D,
509
+ `Variable' resid,
510
+ `Variable' w,
511
+ `Integer' N,
512
+ `Integer' K,
513
+ `String' vce_mode,
514
+ `Variable' true_w)
515
+ {
516
+ `Matrix' M, V
517
+ `Integer' dof_adj
518
+
519
+ if (S.verbose > 0) printf("\n{txt}## Estimating Robust Variance-Covariance Matrix of the Estimators (VCE)\n\n")
520
+ if (S.verbose > 0) printf("{txt} - VCE type: {res}%s{txt}\n", S.vcetype)
521
+ if (S.verbose > 0) printf("{txt} - Weight type: {res}%s{txt}\n", S.weight_type=="" ? "<none>" : S.weight_type)
522
+
523
+ if (rows(true_w)) {
524
+ assert(S.weight_type=="aweight")
525
+ w = (resid :* w) :^ 2 :/ true_w // resid^2 * aw^2 * fw
526
+ }
527
+ else if (S.weight_type=="") {
528
+ w = resid :^ 2
529
+ }
530
+ else if (S.weight_type=="fweight") {
531
+ w = resid :^ 2 :* w
532
+ }
533
+ else if (S.weight_type=="aweight" | S.weight_type=="pweight") {
534
+ w = (resid :* w) :^ 2
535
+ }
536
+
537
+ dof_adj = N / (N - K)
538
+ if (vce_mode == "vce_asymptotic") dof_adj = N / (N-1) // 1.0
539
+ M = S.compute_constant ? quadcross(X, 1, w, X, 1) : quadcross(X, w, X)
540
+ if (S.verbose > 0) {
541
+ printf("{txt} - Small-sample-adjustment: q = N / (N-df_m-df_a) = %g / (%g - %g - %g) = %g\n", N, N, K-S.df_a, S.df_a, N / (N-K) )
542
+ }
543
+ V = D * M * D * dof_adj
544
+ return(V)
545
+ }
546
+
547
+ `Matrix' reghdfe_cluster(`FixedEffects' S,
548
+ `Variables' X,
549
+ `Matrix' D,
550
+ `Variable' resid,
551
+ `Variable' w,
552
+ `Integer' N,
553
+ `Integer' K,
554
+ `String' vce_mode)
555
+ {
556
+ `Matrix' M, V
557
+ `Integer' dof_adj, N_clust, df_r, nested_adj
558
+ `Integer' Q, q, g, sign, i, j
559
+ pointer(`Factor') rowvector FPlist
560
+ `FactorPointer' FP
561
+ `Varlist' vars
562
+ `String' var, var_with_spaces
563
+ `Boolean' clustervar_is_absvar, required_fix
564
+ `Matrix' tuples
565
+ `RowVector' tuple
566
+ `RowVector' N_clust_list
567
+ `Matrix' joined_levels
568
+ `Integer' Msize
569
+
570
+ w = resid :* w
571
+ Msize = cols(X) + S.compute_constant
572
+
573
+ vars = S.clustervars
574
+ Q = cols(vars)
575
+ if (S.verbose > 0) printf("\n{txt}## Estimating Cluster Robust Variance-Covariance Matrix of the Estimators (VCE)\n\n")
576
+ if (S.verbose > 0) printf("{txt} - VCE type: {res}%s{txt} (%g-way clustering)\n", S.vcetype, Q)
577
+ if (S.verbose > 0) printf("{txt} - Cluster variables: {res}%s{txt}\n", invtokens(vars))
578
+ if (S.verbose > 0) printf("{txt} - Weight type: {res}%s{txt}\n", S.weight_type=="" ? "<none>" : S.weight_type)
579
+ assert_msg(0 < Q & Q < 10)
580
+
581
+ // Get or build factors associated with the clustervars
582
+ FPlist = J(1, Q, NULL)
583
+ N_clust_list = J(1, Q, .)
584
+ for (q=1; q<=Q; q++) {
585
+ var = vars[q]
586
+ clustervar_is_absvar = 0
587
+ for (g=1; g<=S.G; g++) {
588
+ if (invtokens(S.factors[g].varlist, "#") == var) {
589
+ clustervar_is_absvar = 1
590
+ FP = &(S.factors[g])
591
+ break
592
+ }
593
+ }
594
+ var_with_spaces = subinstr(var, "#", " ")
595
+ if (!clustervar_is_absvar) FP = &(factor(var_with_spaces, S.sample, ., "", ., ., ., 0))
596
+ N_clust_list[q] = (*FP).num_levels
597
+ if (S.verbose > 0) printf("{txt} - {res}%s{txt} has {res}%g{txt} levels\n", var, N_clust_list[q])
598
+ FPlist[q] = FP
599
+ }
600
+
601
+ // Build the meat part of the V matrix
602
+ if (S.verbose > 0) printf("{txt} - Computing the 'meat' of the VCE\n")
603
+ M = J(Msize, Msize, 0)
604
+ tuples = .
605
+ for (q=1; q<=Q; q++) {
606
+ tuples = reghdfe_choose_n_k(Q, q, tuples)
607
+ sign = mod(q, 2) ? 1 : -1 // + with odd number of variables, - with even
608
+ for (j=1; j<=rows(tuples); j++) {
609
+ tuple = tuples[j, .]
610
+ if (S.verbose > 0) printf("{txt} - Level %g/%g; sublevel %g/%g; M = M %s ClusterVCE(%s)\n", q, Q, j, rows(tuples), sign > 0 ? "+" : "-" , invtokens(strofreal(tuple)))
611
+ if (q==1) {
612
+ assert(tuple==j)
613
+ FP = FPlist[j]
614
+ }
615
+ else if (q==2) {
616
+ FP = &join_factors( *FPlist[tuple[1]] , *FPlist[tuple[2]] , ., ., 1)
617
+ }
618
+ else {
619
+ joined_levels = (*FPlist[tuple[1]]).levels
620
+ for (i=2; i<=cols(tuple); i++) {
621
+ joined_levels = joined_levels, (*FPlist[tuple[i]]).levels
622
+ }
623
+ FP = &_factor(joined_levels, ., ., "", ., ., ., 0)
624
+ }
625
+ M = M + sign * reghdfe_vce_cluster_meat(FP, X, w, Msize, S.compute_constant)
626
+ }
627
+ }
628
+
629
+ // Build VCE
630
+ N_clust = min(N_clust_list)
631
+ nested_adj = (S.df_a==0) // minor adj. so we match xtreg when the absvar is nested within cluster
632
+ // (when ..nested.., df_a is zero so we divide N-1 by something that can potentially be N (!))
633
+ // so we either add the 1 back, or change the numerator (and the N_clust-1 factor!)
634
+ dof_adj = (N - 1) / (N - nested_adj - K) * N_clust / (N_clust - 1) // adjust for more than 1 cluster
635
+ if (vce_mode == "vce_asymptotic") dof_adj = N_clust / (N_clust - 1) // 1.0
636
+ if (S.verbose > 0) {
637
+ printf("{txt} - Small-sample-adjustment: q = (%g - 1) / (%g - %g) * %g / (%g - 1) = %g\n", N, N, K+nested_adj, N_clust, N_clust, dof_adj)
638
+ }
639
+ V = D * M * D * dof_adj
640
+ if (Q > 1) {
641
+ required_fix = reghdfe_fix_psd(V)
642
+ if (required_fix) printf("{txt}Warning: VCV matrix was non-positive semi-definite; adjustment from Cameron, Gelbach & Miller applied.\n")
643
+ }
644
+
645
+ // Store e()
646
+ assert(!missing(S.df_r))
647
+ df_r = N_clust - 1
648
+ if (S.df_r > df_r) {
649
+ S.df_r = df_r
650
+ }
651
+ else if (S.verbose > 0) {
652
+ printf("{txt} - Unclustered df_r (N - df_m - df_a = %g) are {it:lower} than clustered df_r (N_clust-1 = %g)\n", S.df_r, df_r)
653
+ printf("{txt} Thus, we set e(df_r) as the former.\n")
654
+ printf("{txt} This breaks consistency with areg but ensures internal consistency\n")
655
+ printf("{txt} between vce(robust) and vce(cluster _n)\n")
656
+ }
657
+
658
+ S.N_clust = N_clust
659
+ S.N_clust_list = N_clust_list
660
+
661
+ return(V)
662
+ }
663
+
664
+
665
+ `Matrix' reghdfe_vce_cluster_meat(`FactorPointer' FP,
666
+ `Variables' X,
667
+ `Variable' resid,
668
+ `Integer' Msize,
669
+ `Boolean' compute_constant)
670
+ {
671
+ `Integer' i, N_clust
672
+ `Variables' X_sorted
673
+ `Variable' resid_sorted
674
+ `Matrix' X_tmp
675
+ `Vector' resid_tmp
676
+ `RowVector' Xe_tmp
677
+ `Matrix' M
678
+
679
+ if (cols(X)==0 & !compute_constant) return(J(0,0,0))
680
+
681
+ N_clust = (*FP).num_levels
682
+ (*FP).panelsetup()
683
+ X_sorted = (*FP).sort(X)
684
+ resid_sorted = (*FP).sort(resid)
685
+ M = J(Msize, Msize, 0)
686
+
687
+ if (cols(X)) {
688
+ for (i=1; i<=N_clust; i++) {
689
+ X_tmp = panelsubmatrix(X_sorted, i, (*FP).info)
690
+ resid_tmp = panelsubmatrix(resid_sorted, i, (*FP).info)
691
+ Xe_tmp = quadcross(1, 0, resid_tmp, X_tmp, compute_constant) // Faster than colsum(e_tmp :* X_tmp)
692
+ M = M + quadcross(Xe_tmp, Xe_tmp)
693
+ }
694
+ }
695
+ else {
696
+ // Workaround for when there are no Xs except for _cons
697
+ assert(compute_constant)
698
+ for (i=1; i<=N_clust; i++) {
699
+ resid_tmp = panelsubmatrix(resid_sorted, i, (*FP).info)
700
+ M = M + quadsum(resid_tmp) ^ 2
701
+ }
702
+ }
703
+
704
+ return(M)
705
+ }
706
+
707
+
708
+ // Enumerate all combinations of K integers from N integers
709
+ // Kroneker approach based on njc's tuples.ado
710
+ `Matrix' reghdfe_choose_n_k(`Integer' n, `Integer' k, `Matrix' prev_ans)
711
+ {
712
+ `RowVector' v
713
+ `Integer' q
714
+ `Matrix' candidate
715
+ `Matrix' ans
716
+ v = 1::n
717
+ if (k==1) return(v)
718
+
719
+ q = rows(prev_ans)
720
+ assert(q==comb(n, k-1))
721
+ assert(cols(prev_ans)==k-1)
722
+ candidate = v # J(q, 1, 1)
723
+ candidate = candidate , J(n, 1, prev_ans)
724
+ ans = select(candidate, candidate[., 1] :< candidate[., 2])
725
+ return(ans)
726
+ }
727
+
728
+
729
+ // --------------------------------------------------------------------------
730
+ // Fix non-positive VCV
731
+ // --------------------------------------------------------------------------
732
+ // If the VCV matrix is not positive-semidefinite, use the fix from
733
+ // Cameron, Gelbach & Miller - Robust Inference with Multi-way Clustering (JBES 2011)
734
+ // 1) Use eigendecomposition V = U Lambda U' where U are the eigenvectors and Lambda = diag(eigenvalues)
735
+ // 2) Replace negative eigenvalues into zero and obtain FixedLambda
736
+ // 3) Recover FixedV = U * FixedLambda * U'
737
+ `Boolean' function reghdfe_fix_psd(`Matrix' V) {
738
+ `Matrix' U
739
+ `Matrix' lambda
740
+ `Boolean' required_fix
741
+
742
+ if (!issymmetric(V)) _makesymmetric(V)
743
+ if (!issymmetric(V)) exit(error(505))
744
+ symeigensystem(V, U=., lambda=.)
745
+ if (min(lambda)<0) {
746
+ lambda = lambda :* (lambda :>= 0)
747
+ // V = U * diag(lambda) * U'
748
+ V = quadcross(U', lambda, U')
749
+ required_fix = 1
750
+ }
751
+ else {
752
+ required_fix = 0
753
+ }
754
+ return(required_fix)
755
+ }
756
+
757
+
758
+ // --------------------------------------------------------------------------
759
+ // Remove collinear variables
760
+ // --------------------------------------------------------------------------
761
+ // Based on ivreg2's s_rmcoll2
762
+ `Matrix' reghdfe_rmcoll(`Varlist' varnames,
763
+ `Matrix' xx,
764
+ `RowVector' kept)
765
+ {
766
+ `Integer' K, num_dropped
767
+ `Matrix' inv_xx, smat, alt_inv_xx
768
+ `RowVector' vl_drop, vl_keep
769
+
770
+ assert(rows(xx)==cols(xx))
771
+ K = cols(xx)
772
+ inv_xx = K ? invsym(xx, 1..K) : J(0, 0, .)
773
+
774
+ // Specifying the sweep order in invsym() can lead to incorrectly dropped regressors
775
+ // (EG: with very VERY high weights)
776
+ // We'll double check in this case
777
+ num_dropped = diag0cnt(inv_xx)
778
+ if (K & num_dropped) {
779
+ alt_inv_xx = invsym(xx)
780
+ if (num_dropped != diag0cnt(alt_inv_xx)) {
781
+ inv_xx = alt_inv_xx
782
+ num_dropped = diag0cnt(alt_inv_xx)
783
+ }
784
+ }
785
+
786
+ st_numscalar("r(k_omitted)", num_dropped)
787
+ smat = (diagonal(inv_xx) :== 0)'
788
+ vl_drop = select(varnames, smat)
789
+ vl_keep = select(varnames, !smat)
790
+ if (cols(vl_keep)) st_global("r(varlist)", invtokens(vl_keep))
791
+ if (cols(vl_drop)) st_global("r(omitted)", invtokens(vl_drop))
792
+ kept = `selectindex'(!smat) // Return it, so we can exclude these variables from the joint Wald test
793
+ return(inv_xx)
794
+ }
795
+
796
+
797
+ // --------------------------------------------------------------------------
798
+ // Use regression-through-mean and block partition formula to enlarge b and inv(XX)
799
+ // --------------------------------------------------------------------------
800
+ `Void' reghdfe_extend_b_and_inv_xx(
801
+ `RowVector' means,
802
+ `Integer' N,
803
+ `Vector' b,
804
+ `Matrix' inv_xx)
805
+ {
806
+ // How to add back _cons:
807
+ // 1) To recover coefficient, apply "regression through means formula":
808
+ // b0 = mean(y) - mean(x) * b1
809
+
810
+ // 2) To recover variance ("full_inv_xx")
811
+ // apply formula for inverse of partitioned symmetric matrix
812
+ // http://fourier.eng.hmc.edu/e161/lectures/gaussianprocess/node6.html
813
+ // http://www.cs.nthu.edu.tw/~jang/book/addenda/matinv/matinv/
814
+ //
815
+ // Given A = [X'X X'1] B = [B11 B21'] B = inv(A)
816
+ // [1'X 1'1] [B21 B22 ]
817
+ //
818
+ // B11 is just inv(xx) (because of Frisch-Waugh)
819
+ // B21 ("side") = means * B11
820
+ // B22 ("corner") = 1 / sumweights * (1 - side * means')
821
+ //
822
+ // - Note that means is NOT A12, but A12/N or A12 / (sum_weights)
823
+
824
+ // - Note: aw and pw (and unweighted) use normal weights,
825
+ // but for fweights we expected S.sumweights
826
+
827
+ `RowVector' means_x, side
828
+ `Real' corner
829
+
830
+ means_x = cols(means) > 1 ? means[2..cols(means)] : J(1, 0, .)
831
+ b = b \ means[1] - means_x * b // means * (1 \ -b)
832
+ corner = (1 / N) + means_x * inv_xx * means_x'
833
+ side = - means_x * inv_xx
834
+ inv_xx = (inv_xx , side' \ side , corner)
835
+
836
+ }
837
+
838
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_constructor.mata ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // --------------------------------------------------------------------------
2
+ // FixedEffects constructor (also precomputes factors)
3
+ // --------------------------------------------------------------------------
4
+
5
+ mata:
6
+
7
+ `FixedEffects' fixed_effects(`Varlist' absvars,
8
+ | `Varname' touse,
9
+ `String' weighttype,
10
+ `Varname' weightvar,
11
+ `Boolean' drop_singletons,
12
+ `Boolean' verbose)
13
+ {
14
+ `FixedEffects' S
15
+ `Varname' absvar, cvars
16
+ `Integer' i, j, g, gg, remaining
17
+ `Vector' idx
18
+ `Integer' spaces
19
+ `Integer' num_singletons_i
20
+ `Variables' cvar_data
21
+ `FactorPointer' pf
22
+
23
+ // Set default value of arguments
24
+ if (args()<2) touse = ""
25
+ if (args()<3) weighttype = ""
26
+ if (args()<4) weightvar = ""
27
+ if (args()<5 | drop_singletons==.) drop_singletons = 1
28
+ if (args()<6 | verbose==.) verbose = 0
29
+
30
+ S = FixedEffects()
31
+ S.verbose = verbose
32
+ S.drop_singletons = drop_singletons
33
+
34
+ // Parse absvars
35
+ if (S.verbose > 0) printf("\n{txt}## Parsing absvars and HDFE options\n")
36
+
37
+ if (touse == "") touse = st_tempname()
38
+ st_global("reghdfe_touse", touse)
39
+ stata(`"reghdfe_parse "' + absvars)
40
+ S.sample = `selectindex'(st_data(., touse))
41
+ S.tousevar = touse // useful if later on we want to clone the HDFE object
42
+ st_global("reghdfe_touse", "")
43
+
44
+ if (st_global("s(residuals)") != "") S.residuals = st_global("s(residuals)")
45
+ if (st_global("s(verbose)")!="") S.verbose = verbose = strtoreal(st_global("s(verbose)"))
46
+ if (st_global("s(drop_singletons)")!="") S.drop_singletons = drop_singletons = strtoreal(st_global("s(drop_singletons)"))
47
+ assert(S.verbose < .)
48
+ assert(S.drop_singletons==0 | S.drop_singletons==1)
49
+
50
+ if (S.verbose > 0) stata("sreturn list")
51
+ S.G = strtoreal(st_global("s(G)"))
52
+ S.absorb = absvars // useful if later on we want to clone the HDFE object
53
+ S.absvars = tokens(st_global("s(absvars)"))
54
+ S.has_intercept = strtoreal(st_global("s(has_intercept)"))
55
+ S.save_any_fe = strtoreal(st_global("s(save_any_fe)"))
56
+ S.save_all_fe = strtoreal(st_global("s(save_all_fe)"))
57
+ S.ivars = tokens(st_global("s(ivars)"))
58
+ S.cvars = tokens(st_global("s(cvars)"))
59
+ S.targets = strtrim(tokens(st_global("s(targets)")))
60
+ S.intercepts = strtoreal(tokens(st_global("s(intercepts)")))
61
+ S.num_slopes = strtoreal(tokens(st_global("s(num_slopes)")))
62
+ S.save_fe = S.targets :!= ""
63
+ S.report_constant = strtoreal(st_global("s(report_constant)"))
64
+ S.always_run_lsmr_preconditioner = strtoreal(st_global("s(precondition)"))
65
+
66
+ // Ensure that S.report_constant and S.has_intercept are 0/1
67
+ assert(anyof((0,1), S.has_intercept))
68
+ assert(anyof((0,1), S.report_constant))
69
+ S.compute_constant = S.has_intercept & S.report_constant
70
+
71
+ if (st_global("s(tolerance)") != "") S.tolerance = strtoreal(st_global("s(tolerance)"))
72
+ if (st_global("s(maxiter)") != "") S.maxiter = strtoreal(st_global("s(maxiter)"))
73
+ if (st_global("s(prune)") != "") S.prune = strtoreal(st_global("s(prune)"))
74
+ if (st_global("s(transform)") != "") S.transform = st_global("s(transform)")
75
+ if (st_global("s(acceleration)") != "") S.acceleration = st_global("s(acceleration)")
76
+
77
+ // Override LSMR if G=1
78
+ if (S.G==1 & S.acceleration=="lsmr") S.acceleration = "conjugate_gradient"
79
+
80
+ S.dofadjustments = tokens(st_global("s(dofadjustments)"))
81
+ S.groupvar = st_global("s(groupvar)")
82
+ if (st_global("s(finite_condition)")=="1") S.finite_condition = -1 // signal to compute it
83
+ S.compute_rre = (st_global("s(compute_rre)")=="1")
84
+ if (S.compute_rre) S.rre_varname = st_global("s(rre)")
85
+
86
+ S.poolsize = strtoreal(st_global("s(poolsize)"))
87
+
88
+ if (S.verbose > -1 & !S.has_intercept) printf("{txt}(warning: no intercepts terms in absorb(); regression lacks constant term)\n")
89
+
90
+ S.extended_absvars = tokens(st_global("s(extended_absvars)"))
91
+ S.tss = .
92
+
93
+ assert(1<=S.G)
94
+ if (S.G>10) printf("{txt}(warning: absorbing %2.0f dimensions of fixed effects; check that you really want that)\n", S.G)
95
+ assert(S.G == cols(S.ivars))
96
+ assert(S.G == cols(S.cvars))
97
+ assert(S.G == cols(S.targets))
98
+ assert(S.G == cols(S.intercepts))
99
+ assert(S.G == cols(S.num_slopes))
100
+
101
+ // Fill out object
102
+ S.G = cols(S.absvars)
103
+ S.factors = Factor(S.G)
104
+
105
+ assert_msg(anyof(("", "fweight", "pweight", "aweight", "iweight"), weighttype), "wrong weight type")
106
+ S.weight_type = weighttype
107
+ S.weight_var = weightvar
108
+
109
+ S.num_singletons = 0
110
+ if (drop_singletons) {
111
+ num_singletons_i = 0
112
+ if (weighttype=="fweight" | weighttype=="iweight") {
113
+ S.weight = st_data(S.sample, weightvar) // just to use it in F.drop_singletons()
114
+ }
115
+ }
116
+
117
+
118
+ // (1) create the factors and remove singletons
119
+ remaining = S.G
120
+ i = 0
121
+ if (S.verbose > 0) {
122
+ printf("\n{txt}## Initializing Mata object for %g fixed effects\n\n", S.G)
123
+ spaces = max((0, max(strlen(S.absvars))-4))
124
+ printf("{txt} {c TLC}{hline 4}{c TT}{hline 3}{c TT}{hline 1}%s{hline 6}{c TT}{hline 6}{c TT}{hline 9}{c TT}{hline 11}{c TT}{hline 12}{c TT}{hline 9}{c TT}{hline 14}{c TRC}\n", "{hline 1}" * spaces)
125
+ printf("{txt} {c |} i {c |} g {c |} %s Name {c |} Int? {c |} #Slopes {c |} Obs. {c |} Levels {c |} Sorted? {c |} #Drop Singl. {c |}\n", " " * spaces)
126
+ printf("{txt} {c LT}{hline 4}{c +}{hline 3}{c +}{hline 1}%s{hline 6}{c +}{hline 6}{c +}{hline 9}{c +}{hline 11}{c +}{hline 12}{c +}{hline 9}{c +}{hline 14}{c RT}\n", "{hline 1}" * spaces)
127
+ displayflush()
128
+ }
129
+
130
+ while (remaining) {
131
+ ++i
132
+ g = 1 + mod(i-1, S.G)
133
+ absvar = S.absvars[g]
134
+
135
+ if (S.verbose > 0) {
136
+ printf("{txt} {c |} %2.0f {c |} %1.0f {c |} {res}%s{txt} {c |} ", i, g, (spaces+5-strlen(absvar)) * " " + absvar)
137
+ printf("{txt}{%s}%3s{txt} {c |} %1.0f {c |}", S.intercepts[g] ? "txt" : "err", S.intercepts[g] ? "Yes" : "No", S.num_slopes[g])
138
+ displayflush()
139
+ }
140
+
141
+ if (S.verbose > 0) {
142
+ printf("{res}%10.0g{txt} {c |}", rows(S.sample))
143
+ displayflush()
144
+ }
145
+
146
+ if (rows(S.sample) < 2) {
147
+ if (S.verbose > 0) printf("\n")
148
+ exit(error(2001))
149
+ }
150
+
151
+ if (i<=S.G) {
152
+ if (S.ivars[g] == "_cons" & S.G == 1) {
153
+ // Special case without any fixed effects
154
+
155
+ S.factors[g] = Factor()
156
+ pf = &(S.factors[g])
157
+ (*pf).num_obs = (*pf).counts = rows(S.sample)
158
+ (*pf).num_levels = 1
159
+ //(*pf).levels = . // Not filled to save space
160
+ (*pf).levels = J(rows(S.sample), 1, 1)
161
+ (*pf).is_sorted = 1
162
+ (*pf).method = "none"
163
+
164
+ // The code below is equivalent but 3x slower
165
+ // S.factors[g] = _factor(J(rows(S.sample),1,1), 1, ., "hash0", ., 1, ., 0)
166
+ }
167
+ else {
168
+ // We don't need to save keys (or sort levels but that might change estimates of FEs)
169
+ S.factors[g] = factor(S.ivars[g], S.sample, ., "", ., 1, ., 0)
170
+ }
171
+ }
172
+
173
+ if (S.verbose > 0) {
174
+ printf(" {res}%10.0g{txt} {c |} %7s {c |}", S.factors[g].num_levels, S.factors[g].is_sorted ? "Yes" : "No")
175
+ displayflush()
176
+ }
177
+
178
+ if (drop_singletons) {
179
+
180
+ if (weighttype=="fweight") {
181
+ idx = S.factors[g].drop_singletons(S.weight)
182
+ }
183
+ else if (weighttype=="iweight") {
184
+ idx = S.factors[g].drop_singletons(S.weight, 1) // zero_threshold==1
185
+ }
186
+ else {
187
+ idx = S.factors[g].drop_singletons()
188
+ }
189
+
190
+ num_singletons_i = rows(idx)
191
+ S.num_singletons = S.num_singletons + num_singletons_i
192
+ if (S.verbose > 0) {
193
+ printf(" %10.0g {c |}", num_singletons_i)
194
+ displayflush()
195
+ }
196
+
197
+ if (num_singletons_i==0) {
198
+ --remaining
199
+ }
200
+ else {
201
+ remaining = S.G - 1
202
+
203
+ // sample[idx] = . // not allowed in Mata; instead, make 0 and then select()
204
+ S.sample[idx] = J(rows(idx), 1, 0)
205
+ S.sample = select(S.sample, S.sample)
206
+
207
+ for (j=i-1; j>=max((1, i-remaining)); j--) {
208
+ gg = 1 + mod(j-1, S.G)
209
+ S.factors[gg].drop_obs(idx)
210
+ if (S.verbose > 0) printf("{res} .")
211
+ }
212
+ }
213
+ }
214
+ else {
215
+ if (S.verbose > 0) printf(" n/a {c |}")
216
+ --remaining
217
+ }
218
+ if (S.verbose > 0) printf("\n")
219
+ }
220
+ if (S.verbose > 0) {
221
+ printf("{txt} {c BLC}{hline 4}{c BT}{hline 3}{c BT}{hline 1}%s{hline 6}{c BT}{hline 6}{c BT}{hline 9}{c BT}{hline 11}{c BT}{hline 12}{c BT}{hline 9}{c BT}{hline 14}{c BRC}\n", "{hline 1}" * spaces)
222
+ }
223
+
224
+ if ( drop_singletons & S.num_singletons>0 & S.verbose>-1 | S.factors[1].num_obs<2) {
225
+ if (weighttype=="iweight") {
226
+ // PPML-specific
227
+ printf(`"{txt}(dropped %s observations that are either {browse "http://scorreia.com/research/singletons.pdf":singletons} or {browse "http://scorreia.com/research/separation.pdf":separated} by a fixed effect)\n"', strofreal(S.num_singletons))
228
+ }
229
+ else {
230
+ printf(`"{txt}(dropped %s {browse "http://scorreia.com/research/singletons.pdf":singleton observations})\n"', strofreal(S.num_singletons))
231
+ }
232
+ }
233
+
234
+ if (S.factors[1].num_obs < 2) {
235
+ exit(error(2001))
236
+ }
237
+
238
+ S.N = S.factors[1].num_obs // store number of obs.
239
+ assert(S.N = S.factors[S.G].num_obs)
240
+ assert(S.N > 1)
241
+
242
+
243
+ // (2) run *.panelsetup() after the sample is defined
244
+ if (S.verbose > 0) printf("\n{txt}## Initializing panelsetup() for each fixed effect\n\n")
245
+ for (g=1; g<=S.G; g++) {
246
+ absvar = S.absvars[g]
247
+ if (S.verbose > 0) printf("{txt} - panelsetup({res}%s{txt})\n", absvar)
248
+ S.factors[g].panelsetup()
249
+ }
250
+
251
+ // (3) load cvars
252
+ if (sum(S.num_slopes)) {
253
+ if (S.verbose > 0) printf("\n{txt}## Loading slope variables\n\n")
254
+ for (g=1; g<=S.G; g++) {
255
+ cvars = tokens(S.cvars[g])
256
+ if (S.num_slopes[g]) {
257
+ // Load, standardize, sort by factor and store
258
+ // Don't precompute aux objects (xmeans, inv_xx) as they depend on the weights
259
+ // and will be computed on step (5)
260
+ if (S.verbose > 0) printf("{txt} - cvars({res}%s{txt})\n", invtokens(cvars))
261
+ pf = &(S.factors[g])
262
+ cvar_data = (*pf).sort(st_data(S.sample, cvars))
263
+ asarray((*pf).extra, "x_stdevs", reghdfe_standardize(cvar_data))
264
+ asarray((*pf).extra, "x", cvar_data)
265
+ }
266
+ }
267
+ cvar_data = .
268
+ }
269
+
270
+ // (4) prune edges of degree-1
271
+ // S.prune = 0 // bugbug
272
+ if (S.prune) S.prune_1core()
273
+
274
+ // (5) load weight
275
+ S.load_weights(weighttype, weightvar, J(0,1,.), 1) // update S.has_weights, S.factors, etc.
276
+
277
+ // Save "true" residuals for RRE
278
+ if (S.compute_rre) {
279
+ assert_msg(S.rre_varname != "")
280
+ S.rre_true_residual = st_data(S.sample, S.rre_varname)
281
+ }
282
+
283
+ return(S)
284
+ }
285
+
286
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_estat.ado ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program reghdfe_estat, rclass
2
+ version `=cond(c(version)<14, c(version), 13)'
3
+ if !inlist("`e(cmd)'", "reghdfe", "ppmlhdfe") {
4
+ error 301
5
+ }
6
+
7
+ gettoken key 0 : 0, parse(", ")
8
+ local lkey = length(`"`key'"')
9
+
10
+ if `"`key'"' == substr("summarize",1,max(2,`lkey')) {
11
+
12
+ local 0 `rest'
13
+ syntax [anything] , [*] [noheader] // -noheader- gets silently ignored b/c it will always be -on-
14
+
15
+ **if ("`anything'"=="") {
16
+ ** * By default include the instruments
17
+ ** local anything // `e(depvar)' `e(indepvars)' `e(endogvars)' `e(instruments)'
18
+ **}
19
+
20
+ * Need to use -noheader- as a workaround to the bug in -estat_summ-
21
+ estat_summ `anything' , `options' noheader
22
+
23
+ }
24
+ else if `"`key'"' == "vce" {
25
+ vce `0'
26
+ }
27
+ else if `"`key'"' == "ic" {
28
+ syntax, [*]
29
+ estat_default ic, df(`=e(df_m)+1') `options'
30
+ }
31
+ else {
32
+ di as error `"invalid subcommand `key'"'
33
+ exit 321
34
+ }
35
+ return add // ?
36
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_footnote.ado ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -------------------------------------------------------------
2
+ // Display Regression Footnote
3
+ // -------------------------------------------------------------
4
+ program reghdfe_footnote
5
+ syntax [, width(int 13)]
6
+
7
+ if (`"`e(absvars)'"' == "_cons") {
8
+ exit
9
+ }
10
+
11
+ tempname table
12
+ matrix `table' = e(dof_table)
13
+ mata: st_local("var_width", strofreal(max(strlen(st_matrixrowstripe("`table'")[., 2]))))
14
+ if (`var_width' > `width') loc width = `var_width'
15
+ loc rows = rowsof("`table'")
16
+ loc cols = rowsof("`table'")
17
+ local vars : rownames `table'
18
+
19
+ // Setup table
20
+ di as text _n "Absorbed degrees of freedom:"
21
+ tempname mytab
22
+ .`mytab' = ._tab.new, col(5) lmargin(0)
23
+ .`mytab'.width `width' | 12 12 14 1 |
24
+ .`mytab'.pad . 1 1 1 0
25
+ .`mytab'.numfmt . %9.0g %9.0g %9.0g .
26
+ .`mytab'.numcolor . text text result .
27
+ .`mytab'.sep, top
28
+
29
+ local explain_exact 0
30
+ local explain_nested 0
31
+
32
+ // Header
33
+ .`mytab'.titles "Absorbed FE" "Categories" " - Redundant" " = Num. Coefs" ""
34
+ .`mytab'.sep, middle
35
+
36
+ // Body
37
+ forval i = 1/`rows' {
38
+ local var : word `i' of `vars'
39
+ loc var = subinstr("`var'", "1.", "", .)
40
+ loc note " "
41
+ if (`=`table'[`i', 4]'==1) {
42
+ loc note "?"
43
+ loc explain_exact 1
44
+ }
45
+ if (`=`table'[`i', 5]'==1) {
46
+ loc note "*"
47
+ loc explain_nested 1
48
+ }
49
+
50
+ // noabsorb
51
+ if (`rows'==1 & `=`table'[`i', 1]'==1 & strpos("`var'", "__")==1) loc var "_cons"
52
+
53
+ .`mytab'.row "`var'" `=`table'[`i', 1]' `=`table'[`i', 2]' `=`table'[`i', 3]' "`note'"
54
+ }
55
+
56
+ // Bottom
57
+ .`mytab'.sep, bottom
58
+ if (`explain_exact') di as text "? = number of redundant parameters may be higher"
59
+ if (`explain_nested') di as text `"* = FE nested within cluster; treated as redundant for DoF computation"'
60
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_header.ado ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ * (Modified from _coef_table_header.ado)
2
+
3
+ program reghdfe_header
4
+ if !c(noisily) exit
5
+
6
+ tempname left right
7
+ .`left' = {}
8
+ .`right' = {}
9
+
10
+ local width 78
11
+ local colwidths 1 30 51 67
12
+ local i 0
13
+ foreach c of local colwidths {
14
+ local ++i
15
+ local c`i' `c'
16
+ local C`i' _col(`c')
17
+ }
18
+
19
+ local c2wfmt 10
20
+ local c4wfmt 10
21
+ local max_len_title = `c3' - 2
22
+ local c4wfmt1 = `c4wfmt' + 1
23
+ local title `"`e(title)'"'
24
+ local title2 `"`e(title2)'"'
25
+ local title3 `"`e(title3)'"'
26
+ local title4 `"`e(title4)'"'
27
+ local title5 `"`e(title5)'"'
28
+
29
+ // Right hand header ************************************************
30
+
31
+ *N obs
32
+ .`right'.Arrpush `C3' "Number of obs" `C4' "= " as res %`c4wfmt'.0fc e(N)
33
+
34
+ * Ftest
35
+ if `"`e(chi2)'"' != "" | "`e(df_r)'" == "" {
36
+ Chi2test `right' `C3' `C4' `c4wfmt'
37
+ }
38
+ else {
39
+ Ftest `right' `C3' `C4' `c4wfmt'
40
+ }
41
+
42
+ * display R-squared
43
+ if !missing(e(r2)) {
44
+ .`right'.Arrpush `C3' "R-squared" `C4' "= " as res %`c4wfmt'.4f e(r2)
45
+ }
46
+ *if !missing(e(r2_p)) {
47
+ * .`right'.Arrpush `C3' "Pseudo R2" `C4' "= " as res %`c4wfmt'.4f e(r2_p)
48
+ *}
49
+ if !missing(e(r2_a)) {
50
+ .`right'.Arrpush `C3' "Adj R-squared" `C4' "= " as res %`c4wfmt'.4f e(r2_a)
51
+ }
52
+ if !missing(e(r2_within)) {
53
+ .`right'.Arrpush `C3' "Within R-sq." `C4' "= " as res %`c4wfmt'.4f e(r2_within)
54
+ }
55
+ if !missing(e(rmse)) {
56
+ .`right'.Arrpush `C3' "Root MSE" `C4' "= " as res %`c4wfmt'.4f e(rmse)
57
+ }
58
+
59
+ // Left hand header *************************************************
60
+
61
+ * make title line part of the header if it fits
62
+ local len_title : length local title
63
+ forv i=2/5 {
64
+ if (`"`title`i''"'!="") {
65
+ local len_title = max(`len_title',`:length local title`i'')
66
+ }
67
+ }
68
+
69
+ if `len_title' < `max_len_title' {
70
+ .`left'.Arrpush `"`"`title'"'"'
71
+ local title
72
+ forv i=2/5 {
73
+ if `"`title`i''"' != "" {
74
+ .`left'.Arrpush `"`"`title`i''"'"'
75
+ local title`i'
76
+ }
77
+ }
78
+ .`left'.Arrpush "" // Empty
79
+ }
80
+
81
+ * Clusters
82
+ local kr = `.`right'.arrnels' // number of elements in the right header
83
+ local kl = `.`left'.arrnels' // number of elements in the left header
84
+ local N_clustervars = e(N_clustervars)
85
+ if (`N_clustervars'==.) local N_clustervars 0
86
+ local space = `kr' - `kl' - `N_clustervars'
87
+ local clustvar = e(clustvar)
88
+ forv i=1/`space' {
89
+ .`left'.Arrpush ""
90
+ }
91
+ forval i = 1/`N_clustervars' {
92
+ gettoken cluster clustvar : clustvar
93
+ local num = e(N_clust`i')
94
+ .`left'.Arrpush `C1' "Number of clusters (" as res "`cluster'" as text ") " `C2' as text "= " as res %`c2wfmt'.0fc `num'
95
+ }
96
+
97
+ HeaderDisplay `left' `right' `"`title'"' `"`title2'"' `"`title3'"' `"`title4'"' `"`title5'"'
98
+ end
99
+
100
+ program HeaderDisplay
101
+ args left right title1 title2 title3 title4 title5
102
+
103
+ local nl = `.`left'.arrnels'
104
+ local nr = `.`right'.arrnels'
105
+ local K = max(`nl',`nr')
106
+
107
+ di
108
+ if `"`title1'"' != "" {
109
+ di as txt `"`title'"'
110
+ forval i = 2/5 {
111
+ if `"`title`i''"' != "" {
112
+ di as txt `"`title`i''"'
113
+ }
114
+ }
115
+ if `K' {
116
+ di
117
+ }
118
+ }
119
+
120
+ local c _c
121
+ forval i = 1/`K' {
122
+ di as txt `.`left'[`i']' as txt `.`right'[`i']'
123
+ }
124
+ end
125
+
126
+ program Ftest
127
+ args right C3 C4 c4wfmt is_svy
128
+
129
+ local df = e(df_r)
130
+ if !missing(e(F)) {
131
+ .`right'.Arrpush ///
132
+ `C3' "F(" ///
133
+ as res %4.0f e(df_m) ///
134
+ as txt "," ///
135
+ as res %7.0f `df' as txt ")" `C4' "= " ///
136
+ as res %`c4wfmt'.2f e(F)
137
+ .`right'.Arrpush ///
138
+ `C3' "Prob > F" `C4' "= " ///
139
+ as res %`c4wfmt'.4f Ftail(e(df_m),`df',e(F))
140
+ }
141
+ else {
142
+ local dfm_l : di %4.0f e(df_m)
143
+ local dfm_l2: di %7.0f `df'
144
+ local j_robust "{help j_robustsingular##|_new:F(`dfm_l',`dfm_l2')}"
145
+ .`right'.Arrpush ///
146
+ `C3' "`j_robust'" ///
147
+ as txt `C4' "= " as result %`c4wfmt's "."
148
+ .`right'.Arrpush ///
149
+ `C3' "Prob > F" `C4' "= " as res %`c4wfmt's "."
150
+ }
151
+ end
152
+
153
+ program Chi2test
154
+
155
+ args right C3 C4 c4wfmt
156
+
157
+ local type `e(chi2type)'
158
+ if `"`type'"' == "" {
159
+ local type Wald
160
+ }
161
+ if !missing(e(chi2)) {
162
+ .`right'.Arrpush ///
163
+ `C3' "`type' chi2(" ///
164
+ as res e(df_m) ///
165
+ as txt ")" `C4' "= " ///
166
+ as res %`c4wfmt'.2f e(chi2)
167
+ .`right'.Arrpush ///
168
+ `C3' "Prob > chi2" `C4' "= " ///
169
+ as res %`c4wfmt'.4f chi2tail(e(df_m),e(chi2))
170
+ }
171
+ else {
172
+ local j_robust ///
173
+ "{help j_robustsingular##|_new:`type' chi2(`e(df_m)')}"
174
+ .`right'.Arrpush ///
175
+ `C3' "`j_robust'" ///
176
+ as txt `C4' "= " as res %`c4wfmt's "."
177
+ .`right'.Arrpush ///
178
+ `C3' "Prob > chi2" `C4' "= " ///
179
+ as res %`c4wfmt's "."
180
+ }
181
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_lsmr.mata ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mata:
2
+
3
+ // --------------------------------------------------------------------------
4
+ // LSMR estimation: Solve Ax=b with LS (ignore consistent case) (A?=y) (Z=D?)
5
+ // --------------------------------------------------------------------------
6
+ // Source: http://web.stanford.edu/group/SOL/software/lsmr/
7
+ // Code based on https://github.com/timtylin/lsmr-SLIM/blob/master/lsmr.m
8
+ // Copyright (BSD2): https://github.com/timtylin/lsmr-SLIM/blob/master/license.txt
9
+
10
+ // Requirements
11
+ // A(x, 1) = Ax Projections "xβ"
12
+ // A(x, 2) = A'x Sum of y by group; panelmean() if dummies and w/precond
13
+
14
+ `Vector' lsmr(`FixedEffects' S, `Vector' b, `Vector' x) {
15
+ `Real' eps
16
+ `Integer' iter // m, n
17
+ `Real' beta, zetabar, alphabar, rho, rhobar, cbar, sbar
18
+ `Real' betadd, betad, rhodold, tautildeold, thetatilde, zeta, d
19
+ `Real' normA2, maxrbar, minrbar
20
+ `Real' normb, normr
21
+ `Real' test1, test2, test3
22
+ `Vector' u, v, h, hbar
23
+
24
+ `Real' alpha, alphahat, lambda, chat, shat, rhoold, c, s, thetanew, rhobarold, zetaold, stildeold
25
+ `Real' thetabar, rhotemp, betaacute, betacheck, betahat, thetatildeold, rhotildeold, ctildeold, taud
26
+ `Real' normA, normAr, condA, normx, rtol
27
+
28
+ assert(cols(b)==1)
29
+ if (S.verbose > 0) printf("\n{txt}## Computing LSMR\n\n")
30
+
31
+ // Constants
32
+ eps = epsilon(1)
33
+
34
+ lambda = 0 // not used
35
+ S.converged = 0
36
+
37
+ beta = S.lsmr_norm(b)
38
+ assert_msg(beta < . , "beta is missing")
39
+ u = (beta > eps) ? (b / beta) : b
40
+ v = S.lsmr_At_mult(u) // v = (*A)(u, 2)
41
+ assert_msg(!missing(v), "-v- missing")
42
+ // m = rows(v) // A is m*n
43
+ // n = rows(u)
44
+
45
+ alpha = S.lsmr_norm(v)
46
+ assert_msg(alpha < . , "alpha is missing")
47
+ if (alpha > eps) v = v / alpha
48
+
49
+ // Initialize variables for 1st iteration.
50
+ zetabar = alpha * beta
51
+ alphabar = alpha
52
+ rho = rhobar = cbar = 1
53
+ sbar = 0
54
+
55
+ h = v
56
+ hbar = J(rows(h), 1, 0) // remove this
57
+ //x = J(rows(h), 1, 0)
58
+
59
+ // Initialize variables for estimation of ||r||
60
+ betadd = beta
61
+ betad = 0
62
+ rhodold = 1
63
+ tautildeold = 0
64
+ thetatilde = 0
65
+ zeta = 0
66
+ d = 0
67
+
68
+ // Initialize variables for estimation of ||A|| and cond(A)
69
+ normA2 = alpha ^ 2
70
+ maxrbar = 0
71
+ minrbar = 1e+100
72
+
73
+ // Items for use in stopping rules.
74
+ normb = beta
75
+ normr = beta
76
+
77
+ // Exit if b=0 or A'b = 0.
78
+ normAr = alpha * beta
79
+ if (normAr == 0) {
80
+ "DONE -> UPDATE THIS STOPPING CONDITION"
81
+ return
82
+ }
83
+
84
+ if (S.verbose > 1) {
85
+ "< < < <"
86
+ test1 = 1
87
+ test2 = alpha / beta
88
+ printf(" %10.3e %10.3e\n", normr, normAr )
89
+ printf(" %8.1e %8.1e\n" , test1, test2 )
90
+ "> > > > "
91
+ }
92
+
93
+ // Main loop
94
+
95
+ for (iter=1; iter<=S.maxiter; iter++) {
96
+
97
+ // Update (1) βu = Av - αu (2) αv = A'u - βv
98
+ u = S.lsmr_A_mult(v) - alpha * u // u = (*A)(v, 1) - alpha * u
99
+
100
+ //"hash of u"
101
+ //hash1(round(u*1e5))
102
+ //u[1..5]
103
+
104
+ beta = S.lsmr_norm(u)
105
+ if (beta > eps) u = u / beta
106
+
107
+ v = S.lsmr_At_mult(u) - beta * v // v = (*A)(u, 2) - beta * v
108
+ alpha = S.lsmr_norm(v)
109
+ if (alpha > eps) v = v / alpha
110
+
111
+ // α and β are now on iteration {k+1}
112
+
113
+ // Construct rotation Qhat_{k, 2k+1}
114
+ alphahat = S.lsmr_norm((alphabar, lambda))
115
+ assert_msg(alphahat < . , "alphahat is missing")
116
+ chat = alphabar / alphahat
117
+ shat = lambda / alphahat
118
+
119
+ // Use a plane rotation (Q_i) to turn B_i to R_i.
120
+ rhoold = rho
121
+ rho = norm((alphahat, beta))
122
+ c = alphahat / rho
123
+ s = beta / rho
124
+ thetanew = s * alpha
125
+ alphabar = c * alpha
126
+
127
+ // Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar.
128
+ rhobarold = rhobar
129
+ zetaold = zeta
130
+ thetabar = sbar * rho
131
+ rhotemp = cbar * rho
132
+ rhobar = norm((cbar * rho, thetanew))
133
+ cbar = cbar * rho / rhobar
134
+ sbar = thetanew / rhobar
135
+ zeta = cbar * zetabar
136
+ zetabar = -sbar * zetabar
137
+
138
+ // Update h, h_hat, x
139
+ hbar = iter > 1 ? h - (thetabar * rho / (rhoold * rhobarold)) * hbar : h
140
+ assert_msg(!missing(hbar), "hbar missing")
141
+ x = iter > 1 ? x + (zeta / (rho * rhobar)) * hbar : (zeta / (rho * rhobar)) * hbar
142
+ assert_msg(!missing(x), "x missing")
143
+ h = v - (thetanew / rho) * h
144
+
145
+ // Estimate of ||r||
146
+
147
+ // Apply rotation Qhat_{k,2k+1}
148
+ betaacute = chat * betadd
149
+ betacheck = -shat * betadd
150
+
151
+ // Apply rotation Q_{k,k+1}
152
+ betahat = c * betaacute;
153
+ betadd = -s * betaacute;
154
+
155
+ // Apply rotation Qtilde_{k-1}
156
+ // betad = betad_{k-1} here
157
+ thetatildeold = thetatilde
158
+ rhotildeold = norm((rhodold, thetabar))
159
+ ctildeold = rhodold / rhotildeold
160
+ stildeold = thetabar / rhotildeold
161
+ thetatilde = stildeold * rhobar
162
+ rhodold = ctildeold * rhobar
163
+ betad = -stildeold * betad + ctildeold * betahat
164
+
165
+ // betad = betad_k here
166
+ // rhodold = rhod_k here
167
+ tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
168
+ taud = (zeta - thetatilde * tautildeold) / rhodold
169
+ d = d + betacheck^2
170
+ normr = sqrt(d + (betad - taud)^2 + betadd^2)
171
+
172
+ // Estimate ||A||.
173
+ normA2 = normA2 + beta^2
174
+ normA = sqrt(normA2)
175
+ normA2 = normA2 + alpha^2
176
+
177
+ // Estimate cond(A)
178
+ maxrbar = max((maxrbar,rhobarold))
179
+ if (iter > 1) minrbar = min((minrbar,rhobarold))
180
+ condA = max((maxrbar,rhotemp)) / min((minrbar,rhotemp))
181
+
182
+ // Test for convergence.
183
+
184
+ // Compute norms for convergence testing.
185
+ normAr = abs(zetabar)
186
+ normx = S.lsmr_norm(x)
187
+
188
+ // Now use these norms to estimate certain other quantities,
189
+ // some of which will be small near a solution.
190
+ test1 = normr / normb
191
+ test2 = normAr / (normA*normr)
192
+ test3 = 1 / condA
193
+ rtol = S.btol + S.tolerance *normA*normx / normb
194
+
195
+ // The following tests guard against extremely small values of
196
+ // atol, btol or ctol. (The user may have set any or all of
197
+ // the parameters atol, btol, conlim to 0.)
198
+ // The effect is equivalent to the normAl tests using
199
+ // atol = eps, btol = eps, conlim = 1/eps.
200
+
201
+ // Allow for tolerances set by the user.
202
+
203
+ if (test3 <= 1 / S.conlim) S.converged = 3
204
+ if (test2 <= S.tolerance) S.converged = 2
205
+ if (test1 <= rtol) S.converged = 1
206
+
207
+ if (S.verbose > 1) {
208
+ printf(" - Convergence: %g\n", S.converged)
209
+ "iter normr normAr"
210
+ iter, normr, normAr
211
+ "test1 test2 test3"
212
+ test1, test2, test3
213
+ "criteria1 criteria2 criteria3"
214
+ 1/S.conlim , S.tolerance, rtol
215
+ ">>>"
216
+ }
217
+
218
+ if (S.compute_rre & !S.prune) {
219
+ reghdfe_rre_benchmark(b - S.lsmr_A_mult(x), S.rre_true_residual, S.rre_depvar_norm)
220
+ }
221
+
222
+ if (S.converged) break
223
+ }
224
+
225
+ if (!S.converged) {
226
+ printf("\n{err}convergence not achieved in %g iterations (last error=%e); try increasing maxiter() or decreasing tol().\n", S.maxiter, test2)
227
+ exit(430)
228
+ }
229
+
230
+ S.iteration_count = max((iter, S.iteration_count))
231
+
232
+ u = b - S.lsmr_A_mult(x)
233
+ return(u)
234
+ }
235
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_mata.sthlp ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *! version 4.4.0 11sep2017}{...}
3
+ {vieweralsosee "reghdfe" "help reghdfe"}{...}
4
+ {vieweralsosee "ftools" "help ftools"}{...}
5
+ {viewerjumpto "Syntax" "ftools##syntax"}{...}
6
+ {viewerjumpto "Creation" "ftools##creation"}{...}
7
+ {viewerjumpto "Properties and methods" "ftools##properties"}{...}
8
+ {viewerjumpto "Description" "ftools##description"}{...}
9
+ {viewerjumpto "Usage" "ftools##usage"}{...}
10
+ {viewerjumpto "Example" "ftools##example"}{...}
11
+ {viewerjumpto "Remarks" "ftools##remarks"}{...}
12
+ {viewerjumpto "Using functions from collapse" "ftools##collapse"}{...}
13
+ {viewerjumpto "Experimental/advanced" "ftools##experimental"}{...}
14
+ {viewerjumpto "Source code" "ftools##source"}{...}
15
+ {viewerjumpto "Author" "ftools##contact"}{...}
16
+
17
+ {title:Title}
18
+
19
+ {p2colset 5 22 22 2}{...}
20
+ {p2col :{cmd:FixedEffects} {hline 2}}Mata class behind {cmd:reghdfe}{p_end}
21
+ {p2colreset}{...}
22
+
23
+ {marker syntax}{...}
24
+ {title:Syntax}
25
+
26
+ {pstd}
27
+ {it}To construct the object:
28
+
29
+
30
+ {p 8 16 2}
31
+ {it:class FixedEffects}
32
+ {cmd:fixed_effects(}{space 1}{it:absvars} [
33
+ {cmd:,}
34
+ {it:touse}{cmd:,}
35
+ {it:weighttype}{cmd:,}
36
+ {it:weightvar}{cmd:,}
37
+ {it:drop_singletons}{cmd:,}
38
+ {it:verbose}]{cmd:)}
39
+
40
+ {marker arguments}{...}
41
+ {synoptset 38 tabbed}{...}
42
+ {synopthdr}
43
+ {synoptline}
44
+ {p2coldent:* {it:string} absvars}names of variables that identify each set of fixed effects{p_end}
45
+ {synopt:{it:string} touse}name of dummy {help mark:touse} variable{p_end}
46
+ {synopt:{it:string} weighttype}type of weight (fweight, pweight, aweight, iweight){p_end}
47
+ {synopt:{it:string} weightvar}name of weight variable{p_end}
48
+ {synopt:{it:string} drop_singletons}whether to drop singleton groups or not{p_end}
49
+ {synopt:{it:string} verbose}how much information to report
50
+ (0: report warnings, 1 to 4 reports more details, -1 is silent){p_end}
51
+ {p2colreset}{...}
52
+
53
+
54
+ {marker usage}{...}
55
+ {title:Standard usage}
56
+
57
+ {pstd}(optional) First, you can declare the FixedEffects object:
58
+
59
+ {p 8 8 2}
60
+ {cmd:class FixedEffects}{it: HDFE}{break}
61
+
62
+ {pstd}Then, you create the object from categorical variables, categorical-continuous interactions, etc.:
63
+
64
+ {p 8 8 2}
65
+ {it:HDFE }{cmd:=}{bind: }{cmd:fixed_effects(}{it:varnames}{cmd:)}
66
+
67
+ {pstd}
68
+ Then you can modify the object and add important properties:
69
+
70
+ {p 8 8 2}{it:HDFE.varlist }{cmd:=}{bind: }{it:varlist} // used to report messages about all demeaned variables{p_end}
71
+ {p 8 8 2}{it:HDFE.indepvars }{cmd:=}{bind: }{it:indepvars} // used to report messages about demeaned regressors{p_end}
72
+ {p 8 8 2}{it:HDFE.num_clusters }{cmd:=}{bind: }{it:#} // Number of clusters{p_end}
73
+
74
+ {p 8 8 2}
75
+ {it: ... see reghdfe.ado for more options and how to combine them}
76
+
77
+
78
+ {marker properties}{...}
79
+ {title:Properties and Methods}
80
+
81
+ {marker arguments}{...}
82
+ {synoptset 38 tabbed}{...}
83
+
84
+ {synopthdr:properties (factors)}
85
+ {synoptline}
86
+
87
+ {synopt:{it:Integer} {cmd:N}}number of obs{p_end}
88
+ {synopt:{it:Integer} {cmd:M}}Sum of all possible FE coefs{p_end}
89
+ {synopt:{it:Factors} {cmd:factors}}{p_end}
90
+ {synopt:{it:Vector} {cmd:sample}}{p_end}
91
+ {synopt:{it:Varlist} {cmd:absvars}}{p_end}
92
+ {synopt:{it:Varlist} {cmd:ivars}}{p_end}
93
+ {synopt:{it:Varlist} {cmd:cvars}}{p_end}
94
+ {synopt:{it:Boolean} {cmd:has_intercept}}{p_end}
95
+ {synopt:{it:RowVector} {cmd:intercepts}}{p_end}
96
+ {synopt:{it:RowVector} {cmd:num_slopes}}{p_end}
97
+ {synopt:{it:Integer} {cmd:num_singletons}}{p_end}
98
+ {synopt:{it:Boolean} {cmd:save_any_fe}}{p_end}
99
+ {synopt:{it:Boolean} {cmd:save_all_fe}}{p_end}
100
+ {synopt:{it:Varlist} {cmd:targets}}{p_end}
101
+ {synopt:{it:RowVector} {cmd:save_fe}}{p_end}
102
+
103
+ {synopthdr:properties (optimization options)}
104
+ {synoptline}
105
+
106
+ {synopt:{it:Real} {cmd:tolerance}}{p_end}
107
+ {synopt:{it:Integer} {cmd:maxiter}}{p_end}
108
+ {synopt:{it:String} {cmd:transform}}Kaczmarz Cimmino Symmetric_kaczmarz (k c s){p_end}
109
+ {synopt:{it:String} {cmd:acceleration}}Acceleration method. None/No/Empty is none\{p_end}
110
+ {synopt:{it:Integer} {cmd:accel_start}}Iteration where we start to accelerate /set it at 6? 2?3?{p_end}
111
+ {synopt:{it:string} {cmd:slope_method}}{p_end}
112
+ {synopt:{it:Boolean} {cmd:prune}}Whether to recursively prune degree-1 edges{p_end}
113
+ {synopt:{it:Boolean} {cmd:abort}}Raise error if convergence failed?{p_end}
114
+ {synopt:{it:Integer} {cmd:accel_freq}}Specific to Aitken's acceleration{p_end}
115
+ {synopt:{it:Boolean} {cmd:storing_alphas}}1 if we should compute the alphas/fes{p_end}
116
+ {synopt:{it:Real} {cmd:conlim}}specific to LSMR{p_end}
117
+ {synopt:{it:Real} {cmd:btol}}specific to LSMR{p_end}
118
+
119
+ {synopthdr:properties (optimization objects)}
120
+ {synoptline}
121
+
122
+
123
+ {synopt:{it:BipartiteGraph} {cmd:bg}}Used when pruning 1-core vertices{p_end}
124
+ {synopt:{it:Vector} {cmd:pruned_weight}}temp. weight for the factors that were pruned{p_end}
125
+ {synopt:{it:Integer} {cmd:prune_g1}}Factor 1/2 in the bipartite subgraph that gets pruned{p_end}
126
+ {synopt:{it:Integer} {cmd:prune_g2}}Factor 2/2 in the bipartite subgraph that gets pruned{p_end}
127
+ {synopt:{it:Integer} {cmd:num_pruned}}Number of vertices (levels) that were pruned{p_end}
128
+
129
+ {synopthdr:properties (misc)}
130
+ {synoptline}
131
+
132
+ {synopt:{it:Integer} {cmd:verbose}}{p_end}
133
+ {synopt:{it:Boolean} {cmd:timeit}}{p_end}
134
+ {synopt:{it:Boolean} {cmd:store_sample}}{p_end}
135
+ {synopt:{it:Real} {cmd:finite_condition}}{p_end}
136
+ {synopt:{it:Real} {cmd:compute_rre}}Relative residual error: || e_k - e || / || e ||{p_end}
137
+ {synopt:{it:Real} {cmd:rre_depvar_norm}}{p_end}
138
+ {synopt:{it:Vector} {cmd:rre_varname}}{p_end}
139
+ {synopt:{it:Vector} {cmd:rre_true_residual}}{p_end}
140
+
141
+ {synopthdr:properties (weight-specific)}
142
+ {synoptline}
143
+
144
+ {synopt:{it:Boolean} {cmd:has_weights}}{p_end}
145
+ {synopt:{it:Variable} {cmd:weight}}unsorted weight{p_end}
146
+ {synopt:{it:String} {cmd:weight_var}}Weighting variable{p_end}
147
+ {synopt:{it:String} {cmd:weight_type}}Weight type (pw, fw, etc){p_end}
148
+
149
+ {synopthdr:properties (absorbed degrees-of-freedom computations)}
150
+ {synoptline}
151
+
152
+ {synopt:{it:Integer} {cmd:G_extended}}Number of intercepts plus slopes{p_end}
153
+ {synopt:{it:Integer} {cmd:df_a_redundant}}e(mobility){p_end}
154
+ {synopt:{it:Integer} {cmd:df_a_initial}}{p_end}
155
+ {synopt:{it:Integer} {cmd:df_a}}df_a_inital - df_a_redundant{p_end}
156
+ {synopt:{it:Vector} {cmd:doflist_M}}{p_end}
157
+ {synopt:{it:Vector} {cmd:doflist_K}}{p_end}
158
+ {synopt:{it:Vector} {cmd:doflist_M_is_exact}}{p_end}
159
+ {synopt:{it:Vector} {cmd:doflist_M_is_nested}}{p_end}
160
+ {synopt:{it:Vector} {cmd:is_slope}}{p_end}
161
+ {synopt:{it:Integer} {cmd:df_a_nested}}Redundant due to bein nested; used for: r2_a r2_a_within rmse{p_end}
162
+
163
+ {synopthdr:properties (VCE and cluster variables)}
164
+ {synoptline}
165
+
166
+ {synopt:{it:String} {cmd:vcetype}}{p_end}
167
+ {synopt:{it:Integer} {cmd:num_clusters}}{p_end}
168
+ {synopt:{it:Varlist} {cmd:clustervars}}{p_end}
169
+ {synopt:{it:Varlist} {cmd:base_clustervars}}{p_end}
170
+ {synopt:{it:String} {cmd:vceextra}}{p_end}
171
+
172
+ {synopthdr:properties (regression-specific)}
173
+ {synoptline}
174
+
175
+ {synopt:{it:String} {cmd:varlist}}y x1 x2 x3 x4 z1 z2 z3{p_end}
176
+ {synopt:{it:String} {cmd:depvar}}y{p_end}
177
+ {synopt:{it:String} {cmd:indepvars}}x1 x2{p_end}
178
+
179
+ {synopt:{it:Boolean} {cmd:drop_singletons}}{p_end}
180
+ {synopt:{it:String} {cmd:absorb}}contents of absorb(){p_end}
181
+ {synopt:{it:String} {cmd:select_if}}If condition{p_end}
182
+ {synopt:{it:String} {cmd:select_in}}In condition{p_end}
183
+ {synopt:{it:String} {cmd:model}}ols, iv{p_end}
184
+ {synopt:{it:String} {cmd:summarize_stats}}{p_end}
185
+ {synopt:{it:Boolean} {cmd:summarize_quietly}}{p_end}
186
+ {synopt:{it:StringRowVector} {cmd:dofadjustments}}firstpair pairwise cluster continuous{p_end}
187
+ {synopt:{it:Varname} {cmd:groupvar}}{p_end}
188
+ {synopt:{it:String} {cmd:residuals}}{p_end}
189
+ {synopt:{it:RowVector} {cmd:kept}}1 if the regressors are not deemed as omitted (by partial_out+cholsolve+invsym){p_end}
190
+ {synopt:{it:String} {cmd:diopts}}{p_end}
191
+
192
+ {synopthdr:properties (output)}
193
+ {synoptline}
194
+
195
+ {synopt:{it:String} {cmd:cmdline}}{p_end}
196
+ {synopt:{it:String} {cmd:subcmd}}{p_end}
197
+ {synopt:{it:String} {cmd:title}}{p_end}
198
+ {synopt:{it:Boolean} {cmd:converged}}{p_end}
199
+ {synopt:{it:Integer} {cmd:iteration_count}}e(ic){p_end}
200
+ {synopt:{it:Varlist} {cmd:extended_absvars}}{p_end}
201
+ {synopt:{it:String} {cmd:notes}}{p_end}
202
+ {synopt:{it:Integer} {cmd:df_r}}{p_end}
203
+ {synopt:{it:Integer} {cmd:df_m}}{p_end}
204
+ {synopt:{it:Integer} {cmd:N_clust}}{p_end}
205
+ {synopt:{it:Integer} {cmd:N_clust_list}}{p_end}
206
+ {synopt:{it:Real} {cmd:rss}}{p_end}
207
+ {synopt:{it:Real} {cmd:rmse}}{p_end}
208
+ {synopt:{it:Real} {cmd:F}}{p_end}
209
+ {synopt:{it:Real} {cmd:tss}}{p_end}
210
+ {synopt:{it:Real} {cmd:tss_within}}{p_end}
211
+ {synopt:{it:Real} {cmd:sumweights}}{p_end}
212
+ {synopt:{it:Real} {cmd:r2}}{p_end}
213
+ {synopt:{it:Real} {cmd:r2_within}}{p_end}
214
+ {synopt:{it:Real} {cmd:r2_a}}{p_end}
215
+ {synopt:{it:Real} {cmd:r2_a_within}}{p_end}
216
+ {synopt:{it:Real} {cmd:ll}}{p_end}
217
+ {synopt:{it:Real} {cmd:ll_0}}{p_end}
218
+
219
+ {synopthdr:methods}
220
+ {synoptline}
221
+
222
+ {synopt:{it:Void} {cmd:update_sorted_weights}()}{p_end}
223
+ {synopt:{it:Matrix} {cmd:partial_out}()}{p_end}
224
+ {synopt:{it:Void} {cmd:_partial_out}()}in-place alternative to {cmd:partial_out()}{p_end}
225
+ {synopt:{it:Variables} {cmd:project_one_fe}()}{p_end}
226
+ {synopt:{it:Void} {cmd:prune_1core}()}{p_end}
227
+ {synopt:{it:Void} {cmd:_expand_1core}()}{p_end}
228
+ {synopt:{it:Void} {cmd:estimate_dof}()}{p_end}
229
+ {synopt:{it:Void} {cmd:estimate_cond}()}{p_end}
230
+ {synopt:{it:Void} {cmd:save_touse}()}{p_end}
231
+ {synopt:{it:Void} {cmd:store_alphas}()}{p_end}
232
+ {synopt:{it:Void} {cmd:save_variable}()}{p_end}
233
+ {synopt:{it:Void} {cmd:post_footnote}()}{p_end}
234
+ {synopt:{it:Void} {cmd:post}()}{p_end}
235
+ {synopt:{it:Void} {cmd:reload}(copy=0)}{p_end} (run this if e.g. touse changes)
236
+
237
+ {synopthdr:methods (LSMR-specific)}
238
+ {synoptline}
239
+
240
+ {synopt:{it:Real} {cmd:lsmr_norm}()}{p_end}
241
+ {synopt:{it:Vector} {cmd:lsmr_A_mult}()}{p_end}
242
+ {synopt:{it:Vector} {cmd:lsmr_At_mult}()}{p_end}
243
+
244
+
245
+ {marker functions}{...}
246
+ {title:Additional functions}
247
+
248
+ {pstd}
249
+ Several useful Mata functions are included. For instance,
250
+
251
+ {p 8 16 2}
252
+ {it:void}
253
+ {cmd:reghdfe_solve_ols(}{it:HDFE}
254
+ {cmd:,}
255
+ {it:X}{cmd:,}
256
+ {it:...}
257
+ {cmd:)}
258
+
259
+ {pstd}
260
+ See {stata "mata: mata desc using lreghdfe"} for full list of functions and classes.
261
+
262
+
263
+ {marker description}{...}
264
+ {title:Description}
265
+
266
+ {pstd}
267
+ TBD
268
+
269
+
270
+ {marker example}{...}
271
+ {title:Example: OLS regression}
272
+
273
+ {pstd}
274
+ TBD
275
+
276
+
277
+ {inp}
278
+ {hline 60}
279
+ sysuse auto, clear
280
+ local depvar price
281
+ local indepvars weight gear
282
+ mata: HDFE = fixed_effects("turn", "", "fweight", "trunk", 0, 2)
283
+ mata: HDFE.varlist = "`depvar' `indepvars'"
284
+ mata: HDFE.indepvars = "`indepvars'"
285
+ mata: data = HDFE.partial_out("`depvar' `indepvars'")
286
+ mata: reghdfe_solve_ols(HDFE, data, b=., V=., N=., rank=., df_r=., resid=., kept=., "vce_none")
287
+ mata: b
288
+ {hline 60}
289
+ {text}
290
+
291
+
292
+ {marker remarks}{...}
293
+ {title:Remarks}
294
+
295
+ {pstd}
296
+ TBD
297
+
298
+ {marker experimental}{...}
299
+ {title:Experimental/advanced functions}
300
+
301
+ {pstd}
302
+ TBD (LSMR, Prune, Bipartite?)
303
+
304
+ {marker source}{...}
305
+ {title:Source code}
306
+
307
+ {pstd}
308
+ {view reghdfe.mata, adopath asis:reghdfe.mata};
309
+ {view reghdfe_bipartite.mata, adopath asis:reghdfe_bipartite.mata};
310
+ {view reghdfe_class.mata, adopath asis:reghdfe_class.mata};
311
+ {view reghdfe_constructor.mata, adopath asis:reghdfe_constructor.mata};
312
+ {view reghdfe_common.mata, adopath asis:reghdfe_common.mata};
313
+ {view reghdfe_projections.mata, adopath asis:reghdfe_projections.mata};
314
+ {view reghdfe_transforms.mata, adopath asis:reghdfe_transforms.mata};
315
+ {view reghdfe_accelerations.mata, adopath asis:reghdfe_accelerations.mata};
316
+ {view reghdfe_lsmr.mata, adopath asis:reghdfe_lsmr.mata}
317
+ {p_end}
318
+
319
+ {pstd}
320
+ Also, the latest version is available online: {browse "https://github.com/sergiocorreia/reghdfe/tree/master/src"}
321
+
322
+
323
+ {marker author}{...}
324
+ {title:Author}
325
+
326
+ {pstd}Sergio Correia{break}
327
+ {break}
328
+ {browse "http://scorreia.com"}{break}
329
+ {browse "mailto:[email protected]":[email protected]}{break}
330
+ {p_end}
331
+
332
+
333
+ {marker project}{...}
334
+ {title:More Information}
335
+
336
+ {pstd}{break}
337
+ To report bugs, contribute, ask for help, etc. please see the project URL in Github:{break}
338
+ {browse "https://github.com/sergiocorreia/reghdfe"}{break}
339
+ {p_end}
340
+
341
+
342
+ {marker acknowledgment}{...}
343
+ {title:Acknowledgment}
344
+
345
+ {pstd}
346
+ TBD
30/replication_package/Adofiles/reghdfe_2019/reghdfe_old.ado ADDED
The diff for this file is too large to render. See raw diff
 
30/replication_package/Adofiles/reghdfe_2019/reghdfe_old.sthlp ADDED
@@ -0,0 +1,872 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {smcl}
2
+ {* *! version 3.2.9 21feb2016}{...}
3
+ {vieweralsosee "[R] areg" "help areg"}{...}
4
+ {vieweralsosee "[R] xtreg" "help xtreg"}{...}
5
+ {vieweralsosee "[R] ivregress" "help ivregress"}{...}
6
+ {vieweralsosee "" "--"}{...}
7
+ {vieweralsosee "ivreg2" "help ivreg2"}{...}
8
+ {vieweralsosee "ivregress" "help ivregress"}{...}
9
+ {vieweralsosee "reg2hdfe" "help reg2hdfe"}{...}
10
+ {vieweralsosee "a2reg" "help a2reg"}{...}
11
+ {viewerjumpto "Syntax" "reghdfe##syntax"}{...}
12
+ {viewerjumpto "Description" "reghdfe##description"}{...}
13
+ {viewerjumpto "Options" "reghdfe##options"}{...}
14
+ {viewerjumpto "Postestimation Syntax" "reghdfe##postestimation"}{...}
15
+ {viewerjumpto "Remarks" "reghdfe##remarks"}{...}
16
+ {viewerjumpto "Examples" "reghdfe##examples"}{...}
17
+ {viewerjumpto "Stored results" "reghdfe##results"}{...}
18
+ {viewerjumpto "Author" "reghdfe##contact"}{...}
19
+ {viewerjumpto "Updates" "reghdfe##updates"}{...}
20
+ {viewerjumpto "Acknowledgements" "reghdfe##acknowledgements"}{...}
21
+ {viewerjumpto "References" "reghdfe##references"}{...}
22
+ {title:Title}
23
+
24
+ {p2colset 5 18 20 2}{...}
25
+ {p2col :{cmd:reghdfe} {hline 2}}Linear and instrumental-variable/GMM regression absorbing multiple levels of fixed effects{p_end}
26
+ {p2colreset}{...}
27
+
28
+ {marker syntax}{...}
29
+ {title:Syntax}
30
+
31
+ {p 8 15 2} {cmd:reghdfe}
32
+ {depvar} [{indepvars}] [{cmd:(}{it:{help varlist:endogvars}} {cmd:=} {it:{help varlist:iv_vars}}{cmd:)}]
33
+ {ifin} {it:{weight}} {cmd:,} {opth a:bsorb(reghdfe##absvar:absvars)} [{help reghdfe##options:options}] {p_end}
34
+
35
+ {marker opt_summary}{...}
36
+ {synoptset 22 tabbed}{...}
37
+ {synopthdr}
38
+ {synoptline}
39
+ {syntab:Model {help reghdfe##opt_model:[+]}}
40
+ {p2coldent:* {opth a:bsorb(reghdfe##absvar:absvars)}}identifiers of the absorbed fixed effects; each {help reghdfe##absvar:absvar} represents one set of fixed effects{p_end}
41
+ {synopt: {cmdab:a:bsorb(}{it:...}{cmd:,} {cmdab:save:fe)}}save all fixed effect estimates ({it:__hdfe*} prefix); useful for a subsequent {help reghdfe##postestimation:predict}.
42
+ However, see also the {it:resid} option.{p_end}
43
+ {synopt : {opth res:iduals(newvar)}}save residuals; more direct and much faster than saving the fixed effects and then running predict{p_end}
44
+ {synopt :{opth su:mmarize(tabstat##statname:stats)}}equivalent to {help reghdfe##postestimation:estat summarize} after the regression,
45
+ but more flexible, compatible with the {opt fast:} option, and saves results on {it:e(summarize)}{p_end}
46
+ {synopt : {opt subopt:ions(...)}}additional options that will be passed to the regression command (either {help regress}, {help ivreg2}, or {help ivregress}){p_end}
47
+
48
+ {syntab:SE/Robust {help reghdfe##opt_vce:[+]}}
49
+ {p2coldent:+ {opt vce}{cmd:(}{help reghdfe##opt_vce:vcetype} [{cmd:,}{it:opt}]{cmd:)}}{it:vcetype}
50
+ may be {opt un:adjusted} (default), {opt r:obust} or {opt cl:uster} {help fvvarlist} (allowing two- and multi-way clustering){p_end}
51
+ {synopt :}suboptions {opt bw(#)}, {opt ker:nel(str)}, {opt dkraay(#)} and {opt kiefer} allow for AC/HAC estimates; see the {help avar} package{p_end}
52
+
53
+ {syntab:Instrumental-Variable/2SLS/GMM {help reghdfe##opt_iv:[+]}}
54
+ {synopt :{opt est:imator(str)}}either {opt 2sls} (default), {opt gmm:2s} (two-stage GMM),
55
+ {opt liml} (limited-information maximum likelihood) or {opt cue} (which gives approximate results, see discussion below){p_end}
56
+ {synopt :{opt stage:s(list)}}estimate additional regressions; choose any of {opt first} {opt ols} {opt reduced} {opt acid} (or {opt all}){p_end}
57
+ {synopt :{opt ff:irst}}compute first-stage diagnostic and identification statistics{p_end}
58
+ {synopt :{opth iv:suite(subcmd)}}package used in the IV/GMM regressions;
59
+ options are {opt ivreg2} (default; needs installing) and {opt ivregress}{p_end}
60
+
61
+ {syntab:Diagnostic {help reghdfe##opt_diagnostic:[+]}}
62
+ {synopt :{opt v:erbose(#)}}amount of debugging information to show (0=None, 1=Some, 2=More, 3=Parsing/convergence details, 4=Every iteration){p_end}
63
+ {synopt :{opt time:it}}show elapsed times by stage of computation{p_end}
64
+
65
+ {syntab:Optimization {help reghdfe##opt_optimization:[+]}}
66
+ {p2coldent:+ {opth tol:erance(#)}}criterion for convergence (default=1e-8){p_end}
67
+ {synopt :{opth maxit:erations(#)}}maximum number of iterations (default=10,000); if set to missing ({cmd:.}) it will run for as long as it takes.{p_end}
68
+ {synopt :{opth pool:size(#)}}apply the within algorithm in groups of {it:#} variables (default 10). a large poolsize is usually faster but uses more memory{p_end}
69
+ {synopt :{opt accel:eration(str)}}acceleration method; options are conjugate_gradient (cg), steep_descent (sd), aitken (a), and none (no){p_end}
70
+ {synopt :{opt transf:orm(str)}}transform operation that defines the type of alternating projection; options are Kaczmarz (kac), Cimmino (cim), Symmetric Kaczmarz (sym){p_end}
71
+
72
+ {syntab:Speedup Tricks {help reghdfe##opt_speedup:[+]}}
73
+ {synopt :{cmd: cache(save} [,opt]{cmd:)}}absorb all variables without regressing (destructive; combine it with {help preserve:preserve/restore}){p_end}
74
+ {synopt :}suboption {opth keep(varlist)} adds additional untransformed variables to the resulting dataset{p_end}
75
+ {synopt :{cmd: cache(use)}}run regressions on cached data; {it:vce()} must be the same as with {cmd: cache(save)}.{p_end}
76
+ {synopt :{cmd: cache(clear)}}delete Mata objects to clear up memory; no more regressions can be run after this{p_end}
77
+ {synopt :{opt fast}}will not create {it:e(sample)}; disabled when saving fixed effects, residuals or mobility groups{p_end}
78
+
79
+ {syntab:Degrees-of-Freedom Adjustments {help reghdfe##opt_dof:[+]}}
80
+ {synopt :{opt dof:adjustments(list)}}allows selecting the desired adjustments for degrees of freedom;
81
+ rarely used{p_end}
82
+ {synopt: {opth groupv:ar(newvar)}}unique identifier for the first mobility group{p_end}
83
+
84
+ {syntab:Reporting {help reghdfe##opt_reporting:[+]}}
85
+ {synopt :{opt version:}}reports the version number and date of reghdfe, and saves it in e(version). standalone option{p_end}
86
+ {synopt :{opt l:evel(#)}}set confidence level; default is {cmd:level(95)}{p_end}
87
+ {synopt :{it:{help reghdfe##display_options:display_options}}}control column formats, row spacing, line width, display of omitted variables and base and empty cells, and factor-variable labeling.{p_end}
88
+ {synopt :}particularly useful are the {opt noomit:ted} and {opt noempty} options to hide regressors omitted due to collinearity{p_end}
89
+
90
+ {syntab:Undocumented}
91
+ {synopt :{opt keepsin:gletons}}do not drop singleton groups{p_end}
92
+ {synopt :{opt old}}will call the latest 2.x version of reghdfe instead (see the {help reghdfe_old:old help file}){p_end}
93
+ {synoptline}
94
+ {p2colreset}{...}
95
+ {p 4 6 2}* {opt absorb(absvars)} is required.{p_end}
96
+ {p 4 6 2}+ indicates a recommended or important option.{p_end}
97
+ {p 4 6 2}{it:indepvars}, {it:endogvars} and {it:iv_vars} may contain factor variables; see {help fvvarlist}.{p_end}
98
+ {p 4 6 2}all the regression variables may contain time-series operators; see {help tsvarlist}.{p_end}
99
+ {p 4 6 2}{cmd:fweight}s, {cmd:aweight}s and {cmd:pweight}s are allowed; see {help weight}.{p_end}
100
+
101
+
102
+ {marker absvar}{...}
103
+ {title:Absvar Syntax}
104
+
105
+ {synoptset 22}{...}
106
+ {synopthdr:absvar}
107
+ {synoptline}
108
+ {synopt:{cmd:i.}{it:varname}}categorical variable to be absorbed (the {cmd:i.} prefix is tacit){p_end}
109
+ {synopt:{cmd:i.}{it:var1}{cmd:#i.}{it:var2}}absorb the interactions of multiple categorical variables{p_end}
110
+ {synopt:{cmd:i.}{it:var1}{cmd:#}{cmd:c.}{it:var2}}absorb heterogeneous slopes, where {it:var2} has a different slope coef. depending on the category of {it:var1}{p_end}
111
+ {synopt:{it:var1}{cmd:##}{cmd:c.}{it:var2}}equivalent to "{cmd:i.}{it:var1} {cmd:i.}{it:var1}{cmd:#}{cmd:c.}{it:var2}", but {it:much} faster{p_end}
112
+ {synopt:{it:var1}{cmd:##c.(}{it:var2 var3}{cmd:)}}multiple heterogeneous slopes are allowed together. Alternative syntax: {it:var1}{cmd:##(c.}{it:var2} {cmd:c.}{it:var3}{cmd:)}{p_end}
113
+ {synopt:{it:v1}{cmd:#}{it:v2}{cmd:#}{it:v3}{cmd:##c.(}{it:v4 v5}{cmd:)}}factor operators can be combined{p_end}
114
+ {synoptline}
115
+ {p2colreset}{...}
116
+ {p 4 6 2}To save the estimates specific absvars, write {newvar}{inp:={it:absvar}}.{p_end}
117
+ {p 4 6 2}Please be aware that in most cases these estimates are neither consistent nor econometrically identified.{p_end}
118
+ {p 4 6 2}Using categorical interactions (e.g. {it:x}{cmd:#}{it:z}) is faster than running {it:egen group(...)} beforehand.{p_end}
119
+ {p 4 6 2}Singleton obs. are dropped iteratively until no more singletons are found (see ancilliary article for details).{p_end}
120
+ {p 4 6 2}Slope-only absvars ("state#c.time") have poor numerical stability and slow convergence.
121
+ If you need those, either i) increase tolerance or
122
+ ii) use slope-and-intercept absvars ("state##c.time"), even if the intercept is redundant.
123
+ For instance if absvar is "i.zipcode i.state##c.time" then i.state is redundant given i.zipcode, but
124
+ convergence will still be {it:much} faster.{p_end}
125
+
126
+ {marker description}{...}
127
+ {title:Description}
128
+
129
+ {pstd}
130
+ {cmd:reghdfe} is a generalization of {help areg} (and {help xtreg:xtreg,fe}, {help xtivreg:xtivreg,fe}) for multiple levels of fixed effects
131
+ (including heterogeneous slopes), alternative estimators (2sls, gmm2s, liml), and additional robust standard errors (multi-way clustering, HAC standard errors, etc).{p_end}
132
+
133
+ {pstd}Additional features include:{p_end}
134
+
135
+ {p2col 8 12 12 2: a)}A novel and robust algorithm to efficiently absorb the fixed effects (extending the work of Guimaraes and Portugal, 2010).{p_end}
136
+ {p2col 8 12 12 2: b)}Coded in Mata, which in most scenarios makes it even faster than {it:areg} and {it:xtreg} for a single fixed effect (see benchmarks on the Github page).{p_end}
137
+ {p2col 8 12 12 2: c)}Can save the point estimates of the fixed effects ({it:caveat emptor}: the fixed effects may not be identified, see the {help reghdfe##references:references}).{p_end}
138
+ {p2col 8 12 12 2: d)}Calculates the degrees-of-freedom lost due to the fixed effects
139
+ (note: beyond two levels of fixed effects, this is still an open problem, but we provide a conservative approximation).{p_end}
140
+ {p2col 8 12 12 2: e)}Iteratively removes singleton groups by default, to avoid biasing the standard errors (see ancillary document).{p_end}
141
+
142
+ {marker options}{...}
143
+ {title:Options}
144
+
145
+ {marker opt_model}{...}
146
+ {dlgtab:Model and Miscellanea}
147
+
148
+ {phang}
149
+ {opth a:bsorb(reghdfe##absvar:absvars)} list of categorical variables (or interactions) representing the fixed effects to be absorbed.
150
+ this is equivalent to including an indicator/dummy variable for each category of each {it:absvar}. {cmd:absorb()} is required.
151
+
152
+ {pmore}
153
+ To save a fixed effect, prefix the absvar with "{newvar}{cmd:=}".
154
+ For instance, the option {cmd:absorb(firm_id worker_id year_coefs=year_id)} will include firm,
155
+ worker and year fixed effects, but will only save the estimates for the year fixed effects (in the new variable {it:year_coefs}).
156
+
157
+ {pmore}
158
+ If you want to {help reghdfe##postestimation:predict} afterwards but don't care about setting the names of each fixed effect, use the {cmdab:save:fe} suboption.
159
+ This will delete all variables named {it:__hdfe*__} and create new ones as required.
160
+ Example: {it:reghdfe price weight, absorb(turn trunk, savefe)}
161
+
162
+ {phang}
163
+ {opth res:iduals(newvar)} will save the regression residuals in a new variable.
164
+
165
+ {pmore}
166
+ This is a superior alternative than running {cmd:predict, resid} afterwards as it's faster and doesn't require saving the fixed effects.
167
+
168
+ {phang}
169
+ {opth su:mmarize(tabstat##statname:stats)} will report and save a table of summary of statistics of the regression
170
+ variables (including the instruments, if applicable), using the same sample as the regression.
171
+
172
+ {pmore} {opt su:mmarize} (without parenthesis) saves the default set of statistics: {it:mean min max}.
173
+
174
+ {pmore} The complete list of accepted statistics is available in the {help tabstat##statname:tabstat help}. The most useful are {it:count range sd median p##}.
175
+
176
+ {pmore} The summary table is saved in {it:e(summarize)}
177
+
178
+ {pmore} To save the summary table silently (without showing it after the regression table), use the {opt qui:etly} suboption. You can use it by itself ({cmd:summarize(,quietly)}) or with custom statistics ({cmd:summarize(mean, quietly)}).
179
+
180
+ {phang}
181
+ {opt subopt:ions(...)}
182
+ options that will be passed directly to the regression command (either {help regress}, {help ivreg2}, or {help ivregress})
183
+
184
+ {marker opt_vce}{...}
185
+ {dlgtab:SE/Robust}
186
+
187
+ {phang}
188
+ {opth vce:(reghdfe##vcetype:vcetype, subopt)}
189
+ specifies the type of standard error reported.
190
+ Note that all the advanced estimators rely on asymptotic theory, and will likely have poor performance with small samples
191
+ (but again if you are using reghdfe, that is probably not your case)
192
+
193
+ {pmore}
194
+ {opt un:adjusted}/{opt ols:} estimates conventional standard errors, valid even in small samples
195
+ under the assumptions of homoscedasticity and no correlation between observations
196
+
197
+ {pmore}
198
+ {opt r:obust} estimates heteroscedasticity-consistent standard errors (Huber/White/sandwich estimators), but still assuming independence between observations
199
+
200
+ {pmore}Warning: in a FE panel regression, using {opt r:obust} will
201
+ lead to inconsistent standard errors if for every fixed effect, the {it:other} dimension is fixed.
202
+ For instance, in an standard panel with individual and time fixed effects, we require both the number of
203
+ individuals and time periods to grow asymptotically.
204
+ If that is not the case, an alternative may be to use clustered errors,
205
+ which as discussed below will still have their own asymptotic requirements.
206
+ For a discussion, see
207
+ {browse "http://www.princeton.edu/~mwatson/papers/ecta6489.pdf":Stock and Watson, "Heteroskedasticity-robust standard errors for fixed-effects panel-data regression," Econometrica 76 (2008): 155-174}
208
+
209
+ {pmore}
210
+ {opt cl:uster} {it:clustervars} estimates consistent standard errors even when the observations
211
+ are correlated within groups.
212
+
213
+ {pmore}
214
+ Multi-way-clustering is allowed. Thus, you can indicate as many {it:clustervar}s as desired
215
+ (e.g. allowing for intragroup correlation across individuals, time, country, etc).
216
+
217
+ {pmore}
218
+ Each {it:clustervar} permits interactions of the type {it:var1{cmd:#}var2}
219
+ (this is faster than using {cmd:egen group()} for a one-off regression).
220
+
221
+ {pmore} Warning: The number of clusters, for all of the cluster variables, must go off to infinity.
222
+ A frequent rule of thumb is that each cluster variable must have at least 50 different categories
223
+ (the number of categories for each clustervar appears on the header of the regression table).
224
+
225
+ {pstd}
226
+ The following suboptions require either the {help ivreg2} or the {help avar} package from SSC.
227
+ For a careful explanation, see the {help ivreg2##s_robust:ivreg2 help file}, from which the comments below borrow.
228
+
229
+ {pmore}
230
+ {opt u:nadjusted}{cmd:, }{opt bw(#)} (or just {cmd:, }{opt bw(#)}) estimates autocorrelation-consistent standard errors (Newey-West).
231
+
232
+ {pmore}
233
+ {opt r:obust}{cmd:, }{opt bw(#)} estimates autocorrelation-and-heteroscedasticity consistent standard errors (HAC).
234
+
235
+ {pmore}
236
+ {opt cl:uster} {it:clustervars}{cmd:, }{opt bw(#)} estimates standard errors consistent to common autocorrelated disturbances (Driscoll-Kraay). At most two cluster variables can be used in this case.
237
+
238
+ {pmore}
239
+ {cmd:, }{opt kiefer} estimates standard errors consistent under arbitrary intra-group autocorrelation (but not heteroskedasticity) (Kiefer).
240
+
241
+ {pmore}
242
+ {opt kernel(str)} is allowed in all the cases that allow {opt bw(#)}
243
+ The default kernel is {it:bar} (Bartlett). Valid kernels are Bartlett (bar); Truncated (tru); Parzen (par);
244
+ Tukey-Hanning (thann); Tukey-Hamming (thamm); Daniell (dan); Tent (ten); and Quadratic-Spectral (qua or qs).
245
+
246
+ {pstd}
247
+ Advanced suboptions:
248
+
249
+ {pmore}
250
+ {cmd:, }{opt suite(default|mwc|avar)} overrides the package chosen by reghdfe to estimate the VCE.
251
+ {it:default} uses the default Stata computation (allows unadjusted, robust, and at most one cluster variable).
252
+ {it:mwc} allows multi-way-clustering (any number of cluster variables), but without the {it:bw} and {it:kernel} suboptions.
253
+ {it:avar} uses the avar package from SSC. Is the same package used by ivreg2, and allows the {it:bw}, {it:kernel}, {it:dkraay} and {it:kiefer} suboptions.
254
+ This is useful almost exclusively for debugging.
255
+
256
+ {pmore}
257
+ {cmd:, }{opt twice:robust} will compute robust standard errors not only on the first but on the second step of the gmm2s estimation. Requires {opt ivsuite(ivregress)}, but will not give the exact same results as ivregress.
258
+
259
+ {pmore}{it:Explanation:} When running instrumental-variable regressions with the {cmd:ivregress} package,
260
+ robust standard errors, and a gmm2s estimator, reghdfe will translate
261
+ {opt vce(robust)} into {opt wmatrix(robust)} {opt vce(unadjusted)}.
262
+ This maintains compatibility with {cmd:ivreg2} and other packages, but may unadvisable as described in {help ivregress} (technical note). Specifying this option will instead use {opt wmatrix(robust)} {opt vce(robust)}.
263
+
264
+ {pmore}However, computing the second-step vce matrix requires computing updated estimates (including updated fixed effects).
265
+ Since reghdfe currently does not allow this, the resulting standard errors
266
+ {hi:will not be exactly the same as with ivregress}.
267
+ This issue is similar to applying the CUE estimator, described further below.
268
+
269
+ {pmore}Note: The above comments are also appliable to clustered standard error.
270
+
271
+ {marker opt_iv}{...}
272
+ {dlgtab:IV/2SLS/GMM}
273
+
274
+ {phang}
275
+ {opt est:imator}{cmd:(}{opt 2sls}|{opt gmm:2s}|{opt liml}|{opt cue}{cmd:)}
276
+ estimator used in the instrumental-variable estimation
277
+
278
+ {pmore}
279
+ {opt 2sls} (two-stage least squares, default), {opt gmm:2s} (two-stage efficient GMM), {opt liml} (limited-information maximum likelihood), and
280
+ {opt cue} ("continuously-updated" GMM) are allowed.{p_end}
281
+
282
+ {pmore}
283
+ Warning: {opt cue} will not give the same results as ivreg2. See the discussion in
284
+ {browse "http://www.stata-journal.com/sjpdf.html?articlenum=st0030_3": Baum, Christopher F., Mark E. Schaffer, and Steven Stillman. "Enhanced routines for instrumental variables/GMM estimation and testing." Stata Journal 7.4 (2007): 465-506}
285
+ (page 484).
286
+ Note that even if this is not exactly {opt cue}, it may still be a desirable/useful alternative to standard cue, as explained in the article.
287
+
288
+ {phang}
289
+ {opt stage:s(list)}
290
+ adds and saves up to four auxiliary regressions useful when running instrumental-variable regressions:
291
+
292
+ {phang2}{cmd:first} all first-stage regressions{p_end}
293
+ {phang2}{cmd:ols} ols regression (between dependent variable and endogenous variables; useful as a benchmark){p_end}
294
+ {phang2}{cmd:reduced} reduced-form regression (ols regression with included and excluded instruments as regressors){p_end}
295
+ {phang2}{cmd:acid} an "acid" regression that includes both instruments and endogenous variables as regressors; in this setup, excluded instruments should not be significant.{p_end}
296
+
297
+ {pmore}
298
+ You can pass suboptions not just to the iv command but to all stage regressions with a comma after the list of stages. Example:{break}
299
+ {cmd:reghdfe price (weight=length), absorb(turn) subopt(nocollin) stages(first, eform(exp(beta)) )}
300
+
301
+ {pmore}
302
+ By default all stages are saved (see {help estimates dir}).
303
+ The suboption {cmd:,nosave} will prevent that.
304
+ However, future {cmd:replay}s will only replay the iv regression.
305
+
306
+ {phang}
307
+ {opt ffirst}
308
+ compute and report first stage statistics ({help ivreg2##s_relevance:details}); requires the ivreg2 package.
309
+
310
+ {pmore}
311
+ These statistics will be saved on the {it:e(first)} matrix.
312
+ If the first-stage estimates are also saved (with the {cmd:stages()} option), the respective statistics will be copied to {cmd:e(first_*)}.
313
+
314
+ {phang}
315
+ {opth iv:suite(subcmd)}
316
+ allows the IV/2SLS regression to be run either using {opt ivregress} or {opt ivreg2}.
317
+
318
+ {pmore} {opt ivreg2} is the default, but needs to be installed for that option to work.
319
+
320
+ {marker opt_diagnostic}{...}
321
+ {dlgtab:Diagnostic}
322
+
323
+ {phang}
324
+ {opt v:erbose(#)} orders the command to print debugging information.
325
+
326
+ {pmore}
327
+ Possible values are 0 (none), 1 (some information), 2 (even more), 3 (adds dots for each iteration, and reportes parsing details), 4 (adds details for every iteration step)
328
+
329
+ {pmore}
330
+ For debugging, the most useful value is 3. For simple status reports, set verbose to 1.
331
+
332
+ {phang}
333
+ {opt time:it} shows the elapsed time at different steps of the estimation. Most time is usually spent on three steps: map_precompute(), map_solve() and the regression step.
334
+
335
+ {marker opt_dof}{...}
336
+ {dlgtab:Degrees-of-Freedom Adjustments}
337
+
338
+ {phang}
339
+ {opt dof:adjustments(doflist)} selects how the degrees-of-freedom, as well as e(df_a), are adjusted due to the absorbed fixed effects.
340
+
341
+ {pmore}
342
+ Without any adjustment, we would assume that the degrees-of-freedom used by the fixed effects is equal to the count of all the fixed effects
343
+ (e.g. number of individuals + number of years in a typical panel).
344
+ However, in complex setups (e.g. fixed effects by individual, firm, job position, and year),
345
+ there may be a huge number of fixed effects collinear with each other, so we want to adjust for that.
346
+
347
+ {pmore}
348
+ Note: changing the default option is rarely needed, except in benchmarks, and to obtain a marginal speed-up by excluding the {opt pair:wise} option.
349
+
350
+ {pmore}
351
+ {opt all} is the default and almost always the best alternative. It is equivalent to {opt dof(pairwise clusters continuous)}
352
+
353
+ {pmore}
354
+ {opt none} assumes no collinearity across the fixed effects (i.e. no redundant fixed effects). This is overtly conservative, although it is the faster method by virtue of not doing anything.
355
+
356
+ {pmore}
357
+ {opt first:pair} will exactly identify the number of collinear fixed effects across the first two sets of fixed effects
358
+ (i.e. the first absvar and the second absvar).
359
+ The algorithm used for this is described in Abowd et al (1999), and relies on results from graph theory
360
+ (finding the number of connected sub-graphs in a bipartite graph).
361
+ It will not do anything for the third and subsequent sets of fixed effects.
362
+
363
+ {pmore}
364
+ For more than two sets of fixed effects, there are no known results that provide exact degrees-of-freedom as in the case above.
365
+ One solution is to ignore subsequent fixed effects (and thus oversestimate e(df_a) and understimate the degrees-of-freedom).
366
+ Another solution, described below, applies the algorithm between pairs of fixed effects to obtain a better (but not exact) estimate:
367
+
368
+ {pmore}
369
+ {opt pair:wise} applies the aforementioned connected-subgraphs algorithm between pairs of fixed effects.
370
+ For instance, if there are four sets of FEs, the first dimension will usually have no redundant coefficients (i.e. e(M1)==1), since we are running the model without a constant.
371
+ For the second FE, the number of connected subgraphs with respect to the first FE will provide an exact estimate of the degrees-of-freedom lost, e(M2).
372
+
373
+ {pmore}
374
+ For the third FE, we do not know exactly.
375
+ However, we can compute the number of connected subgraphs between the first and third {it:G(1,3)},
376
+ and second and third {it:G(2,3)} fixed effects, and choose the higher of those as the closest estimate for e(M3).
377
+ For the fourth FE, we compute {it:G(1,4)}, {it:G(2,4)} and {it:G(3,4)} and again choose the highest for e(M4).
378
+
379
+ {pmore}
380
+ Finally, we compute e(df_a) = e(K1) - e(M1) + e(K2) - e(M2) + e(K3) - e(M3) + e(K4) - e(M4);
381
+ where e(K#) is the number of levels or dimensions for the #-th fixed effect (e.g. number of individuals or years).
382
+ Note that e(M3) and e(M4) are only conservative estimates and thus we will usually be overestimating the standard errors. However, given the sizes of the datasets typically used with reghdfe, the difference should be small.
383
+
384
+ {pmore}
385
+ Since the gain from {opt pair:wise} is usually {it:minuscule} for large datasets, and the computation is expensive, it may be a good practice to exclude this option for speedups.
386
+
387
+ {pmore}
388
+ {opt cl:usters}
389
+ will check if a fixed effect is nested within a {it:clustervar}.
390
+ In that case, it will set e(K#)==e(M#) and no degrees-of-freedom will be lost due to this fixed effect.
391
+ The rationale is that we are already assuming that the number of effective observations is the number of cluster levels.
392
+ This is the same adjustment that {cmd:xtreg, fe} does, but {cmd:areg} does not use it.
393
+
394
+ {pmore}
395
+ {opt cont:inuous}
396
+ Fixed effects with continuous interactions (i.e. individual slopes, instead of individual intercepts) are dealt with differently.
397
+ In an i.categorical#c.continuous interaction, we will do one check: we count the number of categories where c.continuous is always zero.
398
+ In an i.categorical##c.continuous interaction, we do the above check but replace zero for any particular constant.
399
+ In the case where continuous is constant for a level of categorical, we know it is collinear with the intercept, so we adjust for it.
400
+
401
+ {pmore}
402
+ Additional methods, such as {opt bootstrap} are also possible but not yet implemented.
403
+ Some preliminary simulations done by the author showed a very poor convergence of this method.
404
+
405
+ {phang}
406
+ {opth groupv:ar(newvar)} name of the new variable that will contain the first mobility group.
407
+ Requires {opt pair:wise}, {opt first:pair}, or the default {opt all}.
408
+
409
+ {marker opt_speedup}{...}
410
+ {dlgtab:Speeding Up Estimation}
411
+
412
+ {phang}
413
+ {cmd:reghdfe} {varlist} {ifin}{cmd:,} {opt a:bsorb(absvars)} {cmd:save(cache)} [{it:options}]
414
+
415
+ {pmore}
416
+ This will transform {it:varlist}, absorbing the fixed effects indicated by {it:absvars}.
417
+ It is useful when running a series of alternative specifications with common variables, as the variables will only be transformed once instead of every time a regression is run.
418
+
419
+ {pmore}
420
+ It replaces the current dataset, so it is a good idea to precede it with a {help preserve} command
421
+
422
+ {pmore}
423
+ To keep additional (untransformed) variables in the new dataset, use the {opth keep(varlist)} suboption.
424
+
425
+ {phang}
426
+ {cmd:cache(use)} is used when running reghdfe after a {it:save(cache)} operation. Both the {it:absorb()} and {it:vce()} options must be the same as when the cache was created (the latter because the degrees of freedom were computed at that point).
427
+
428
+ {phang}
429
+ {cmd:cache(clear)} will delete the Mata objects created by {it:reghdfe} and kept in memory after the {it:save(cache)} operation. These objects may consume a lot of memory, so it is a good idea to clean up the cache. Additionally, if you previously specified {it:preserve}, it may be a good time to {it:restore}.
430
+
431
+ {pmore}Example:{p_end}
432
+ {phang2}{cmd:. sysuse auto}{p_end}
433
+ {phang2}{cmd:. preserve}{p_end}
434
+ {phang2}{cmd:.}{p_end}
435
+ {phang2}{cmd:. * Save the cache}{p_end}
436
+ {phang2}{cmd:. reghdfe price weight length, a(turn rep) vce(turn) cache(save, keep(foreign))}{p_end}
437
+ {phang2}{cmd:.}{p_end}
438
+ {phang2}{cmd:. * Run regressions}{p_end}
439
+ {phang2}{cmd:. reghdfe price weight, a(turn rep) cache(use)}{p_end}
440
+ {phang2}{cmd:. reghdfe price length, a(turn rep) cache(use)}{p_end}
441
+ {phang2}{cmd:.}{p_end}
442
+ {phang2}{cmd:. * Clean up}{p_end}
443
+ {phang2}{cmd:. reghdfe, cache(clear)}{p_end}
444
+ {phang2}{cmd:. restore}{p_end}
445
+
446
+ {phang}
447
+ {opt fast} avoids saving {it:e(sample)} into the regression.
448
+ Since saving the variable only involves copying a Mata vector, the speedup is currently quite small.
449
+ Future versions of reghdfe may change this as features are added.
450
+
451
+ {pmore}
452
+ Note that {opt fast} will be disabled when adding variables to the dataset (i.e. when saving residuals, fixed effects, or mobility groups), and is incompatible with most postestimation commands.
453
+
454
+ {pmore}
455
+ If you wish to use {opt fast} while reporting {cmd:estat summarize}, see the {opt summarize} option.
456
+
457
+ {marker opt_optimization}{...}
458
+ {dlgtab:Optimization}
459
+
460
+ {phang}
461
+ {opth tol:erance(#)} specifies the tolerance criterion for convergence; default is {cmd:tolerance(1e-8)}
462
+
463
+ {pmore}
464
+ Note that for tolerances beyond 1e-14, the limits of the {it:double} precision are reached and the results will most likely not converge.
465
+
466
+ {pmore}
467
+ At the other end, is not tight enough, the regression may not identify perfectly collinear regressors. However, those cases can be easily spotted due to their extremely high standard errors.
468
+
469
+ {pmore}
470
+ Warning: when absorbing heterogeneous slopes without the accompanying heterogeneous intercepts, convergence is quite poor and a tight tolerance is strongly suggested (i.e. higher than the default). In other words, an absvar of {it:var1##c.var2} converges easily, but an absvar of {it:var1#c.var2} will converge slowly and may require a tighter tolerance.
471
+
472
+ {phang}
473
+ {opth maxit:erations(#)}
474
+ specifies the maximum number of iterations; the default is {cmd:maxiterations(10000)}; set it to missing ({cmd:.}) to run forever until convergence.
475
+
476
+ {phang}
477
+ {opth pool:size(#)}
478
+ Number of variables that are {it:pooled together} into a matrix that will then be transformed.
479
+ The default is to pool variables in groups of 5. Larger groups are faster with more than one processor, but may cause out-of-memory errors. In that case, set poolsize to 1.
480
+
481
+ {phang}
482
+ {it:Advanced options:}
483
+
484
+ {phang}
485
+ {opt acceleration(str)} allows for different acceleration techniques, from the simplest case of
486
+ no acceleration ({opt no:ne}), to steep descent ({opt st:eep_descent} or {opt sd}), Aitken ({opt a:itken}),
487
+ and finally Conjugate Gradient ({opt co:njugate_gradient} or {opt cg}).
488
+
489
+ {pmore}
490
+ Note: Each acceleration is just a plug-in Mata function, so a larger number of acceleration techniques are available, albeit undocumented (and slower).
491
+
492
+ {phang}
493
+ {opt transf:orm(str)} allows for different "alternating projection" transforms. The classical transform is Kaczmarz ({opt kac:zmarz}), and more stable alternatives are Cimmino ({opt cim:mino}) and Symmetric Kaczmarz ({opt sym:metric_kaczmarz})
494
+
495
+ {pmore}
496
+ Note: Each transform is just a plug-in Mata function, so a larger number of acceleration techniques are available, albeit undocumented (and slower).
497
+
498
+ {pmore}
499
+ Note: The default acceleration is Conjugate Gradient and the default transform is Symmetric Kaczmarz. Be wary that different accelerations often work better with certain transforms. For instance, do not use conjugate gradient with plain Kaczmarz, as it will not converge.
500
+
501
+ {phang}
502
+ {opt precondition} {it:(currently disabled)}
503
+
504
+ {marker opt_reporting}{...}
505
+ {dlgtab:Reporting}
506
+
507
+ {phang}
508
+ {opt l:evel(#)} sets confidence level; default is {cmd:level(95)}
509
+
510
+ {marker display_options}{...}
511
+ {phang}
512
+ {it:display_options}:
513
+ {opt noomit:ted},
514
+ {opt vsquish},
515
+ {opt noempty:cells},
516
+ {opt base:levels},
517
+ {opt allbase:levels},
518
+ {opt nofvlabel},
519
+ {opt fvwrap(#)},
520
+ {opt fvwrapon(style)},
521
+ {opth cformat(%fmt)},
522
+ {opt pformat(%fmt)},
523
+ {opt sformat(%fmt)}, and
524
+ {opt nolstretch};
525
+ see {helpb estimation options##display_options:[R] estimation options}.
526
+ {p_end}
527
+
528
+
529
+ {marker postestimation}{...}
530
+ {title:Postestimation Syntax}
531
+
532
+ Only {cmd:estat summarize}, {cmd:predict} and {cmd:test} are currently supported and tested.
533
+
534
+ {p 8 13 2}
535
+ {cmd:estat summarize}
536
+ {p_end}{col 23}Summarizes {it:depvar} and the variables described in {it:_b} (i.e. not the excluded instruments)
537
+
538
+ {p 8 16 2}
539
+ {cmd:predict}
540
+ {newvar}
541
+ {ifin}
542
+ [{cmd:,} {it:statistic}]
543
+ {p_end}{col 23}May require you to previously save the fixed effects (except for option {opt xb}).
544
+ {col 23}To see how, see the details of the {help reghdfe##absvar:absorb} option
545
+ {col 23}Equation: y = xb + d_absorbvars + e
546
+
547
+ {synoptset 20 tabbed}{...}
548
+ {synopthdr:statistic}
549
+ {synoptline}
550
+ {syntab :Main}
551
+ {p2coldent: {opt xb}}xb fitted values; the default{p_end}
552
+ {p2coldent: {opt xbd}}xb + d_absorbvars{p_end}
553
+ {p2coldent: {opt d}}d_absorbvars{p_end}
554
+ {p2coldent: {opt r:esiduals}}residual{p_end}
555
+ {p2coldent: {opt sc:ore}}score; equivalent to {opt residuals}{p_end}
556
+ {p2coldent: {opt stdp}}standard error of the prediction (of the xb component){p_end}
557
+ {synoptline}
558
+ {p2colreset}{...}
559
+ {p 4 6 2}although {cmd:predict} {help data_types:type} {help newvar} is allowed,
560
+ the resulting variable will always be of type {it:double}.{p_end}
561
+
562
+
563
+ {col 8}{cmd:test}{col 23}Performs significance test on the parameters, see the {help test:stata help}
564
+
565
+ {col 8}{cmd:suest}{col 23}Do not use {cmd:suest}. It will run, but the results will be incorrect. See workaround below
566
+
567
+ {pmore}If you want to perform tests that are usually run with {cmd:suest},
568
+ such as non-nested models, tests using alternative specifications of the variables,
569
+ or tests on different groups, you can replicate it manually, as described
570
+ {browse "http://www.stata.com/statalist/archive/2009-11/msg01485.html":here}.
571
+ {p_end}
572
+
573
+ {marker remarks}{...}
574
+
575
+ {title:Possible Pitfalls and Common Mistakes}
576
+
577
+ {p2col 8 12 12 2: 1.}(note: as of version 2.1, the constant is no longer reported) Ignore the constant; it doesn't tell you much. If you want to use descriptive stats, that's what the {opt sum:marize()} and {cmd:estat summ} commands are for.
578
+ Even better, use {opt noconstant} to drop it (although it's not really dropped as it never existed on the first place!){p_end}
579
+ {p2col 8 12 12 2: 2.}Think twice before saving the fixed effects. They are probably inconsistent / not identified and you will likely be using them wrong.{p_end}
580
+ {p2col 8 12 12 2: 3.}(note: as of version 3.0 singletons are dropped by default) It's good practice to drop singletons. {opt dropsi:ngleton} is your friend.{p_end}
581
+ {p2col 8 12 12 2: 4.}If you use {opt vce(robust)}, be sure that your {it:other} dimension is not "fixed" but grows with N, or your SEs will be wrong.{p_end}
582
+ {p2col 8 12 12 2: 5.}If you use {opt vce(cluster ...)}, check that your number of clusters is high enough (50+ is a rule of thumb). If not, you are making the SEs even worse!{p_end}
583
+ {p2col 8 12 12 2: 6.}The panel variables (absvars) should probably be nested within the clusters (clustervars) due to the within-panel correlation induced by the FEs.
584
+ (this is not the case for *all* the absvars, only those that are treated as growing as N grows){p_end}
585
+ {p2col 8 12 12 2: 7.}If you run analytic or probability weights,
586
+ you are responsible for ensuring that the weights stay
587
+ constant within each unit of a fixed effect (e.g. individual),
588
+ or that it is correct to allow varying-weights for that case.
589
+ {p_end}
590
+ {p2col 8 12 12 2: 8.}Be aware that adding several HDFEs is not a panacea.
591
+ The first limitation is that it only uses within variation (more than acceptable if you have a large enough dataset).
592
+ The second and subtler limitation occurs if the fixed effects are themselves outcomes of the variable of interest (as crazy as it sounds).
593
+ For instance, imagine a regression where we study the effect of past corporate fraud on future firm performance.
594
+ We add firm, CEO and time fixed-effects (standard practice). This introduces a serious flaw: whenever a fraud event is discovered,
595
+ i) future firm performance will suffer, and ii) a CEO turnover will likely occur.
596
+ Moreover, after fraud events, the new CEOs are usually specialized in dealing with the aftershocks of such events
597
+ (and are usually accountants or lawyers).
598
+ The fixed effects of these CEOs will also tend to be quite low, as they tend to manage firms with very risky outcomes.
599
+ Therefore, the regressor (fraud) affects the fixed effect (identity of the incoming CEO).
600
+ Adding particularly low CEO fixed effects will then overstate the performance of the firm,
601
+ and thus {it:understate} the negative effects of fraud on future firm performance.{p_end}
602
+
603
+ {title:Missing Features}
604
+
605
+ {phang}(If you are interested in discussing these or others, feel free to {help reghdfe##contact:contact me})
606
+
607
+ {phang}Code, medium term:
608
+
609
+ {p2col 8 12 12 2: -}Complete GT preconditioning (v4){p_end}
610
+ {p2col 8 12 12 2: -}Improve algorithm that recovers the fixed effects (v5){p_end}
611
+ {p2col 8 12 12 2: -}Improve statistics and tests related to the fixed effects (v5){p_end}
612
+ {p2col 8 12 12 2: -}Implement a -bootstrap- option in DoF estimation (v5){p_end}
613
+
614
+ {phang}Code, long term:
615
+
616
+ {p2col 8 12 12 2: -}The interaction with cont vars (i.a#c.b) may suffer from numerical accuracy issues, as we are dividing by a sum of squares{p_end}
617
+ {p2col 8 12 12 2: -}Calculate exact DoF adjustment for 3+ HDFEs (note: not a problem with cluster VCE when one FE is nested within the cluster){p_end}
618
+ {p2col 8 12 12 2: -}More postestimation commands (lincom? margins?){p_end}
619
+
620
+ {phang}Theory:
621
+
622
+ {p2col 8 12 12 2: -}Add a more thorough discussion on the possible identification issues{p_end}
623
+ {p2col 8 12 12 2: -}Find out a way to use reghdfe iteratively with CUE
624
+ (right now only OLS/2SLS/GMM2S/LIML give the exact same results){p_end}
625
+ {p2col 8 12 12 2: -}Not sure if I should add an F-test for the absvars in the vce(robust) and vce(cluster) cases.
626
+ Discussion on e.g. -areg- (methods and formulas) and textbooks suggests not;
627
+ on the other hand, there may be alternatives:
628
+ {it:{browse "http://www.socialsciences.manchester.ac.uk/disciplines/economics/research/discussionpapers/pdf/EDP-1124.pdf" :A Heteroskedasticity-Robust F-Test Statistic for Individual Effects}}{p_end}
629
+
630
+ {marker examples}{...}
631
+ {title:Examples}
632
+
633
+ {hline}
634
+ {pstd}Setup{p_end}
635
+ {phang2}{cmd:. sysuse auto}{p_end}
636
+
637
+ {pstd}Simple case - one fixed effect{p_end}
638
+ {phang2}{cmd:. reghdfe price weight length, absorb(rep78)}{p_end}
639
+ {hline}
640
+
641
+ {pstd}As above, but also compute clustered standard errors{p_end}
642
+ {phang2}{cmd:. reghdfe price weight length, absorb(rep78) vce(cluster rep78)}{p_end}
643
+ {hline}
644
+
645
+ {pstd}Two and three sets of fixed effects{p_end}
646
+ {phang2}{cmd:. webuse nlswork}{p_end}
647
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year)}{p_end}
648
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year occ)}{p_end}
649
+ {hline}
650
+
651
+ {title:Advanced examples}
652
+
653
+ {pstd}Save the FEs as variables{p_end}
654
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(FE1=idcode FE2=year)}{p_end}
655
+
656
+ {pstd}Report nested F-tests{p_end}
657
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year) nested}{p_end}
658
+
659
+ {pstd}Do AvgE instead of absorb() for one FE{p_end}
660
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year) avge(occ)}{p_end}
661
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa south , absorb(idcode year) avge(AvgByOCC=occ)}{p_end}
662
+
663
+ {pstd}Check that FE coefs are close to 1.0{p_end}
664
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa , absorb(idcode year) check}{p_end}
665
+
666
+ {pstd}Save first mobility group{p_end}
667
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa , absorb(idcode occ) group(mobility_occ)}{p_end}
668
+
669
+ {pstd}Factor interactions in the independent variables{p_end}
670
+ {phang2}{cmd:. reghdfe ln_w i.grade#i.age ttl_exp tenure not_smsa , absorb(idcode occ)}{p_end}
671
+
672
+ {pstd}Interactions in the absorbed variables (notice that only the {it:#} symbol is allowed){p_end}
673
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp tenure not_smsa , absorb(idcode#occ)}{p_end}
674
+
675
+ {pstd}Interactions in both the absorbed and AvgE variables (again, only the {it:#} symbol is allowed){p_end}
676
+ {phang2}{cmd:. reghdfe ln_w grade age ttl_exp not_smsa , absorb(idcode#occ) avge(tenure#occ)}{p_end}
677
+
678
+ {pstd}IV regression{p_end}
679
+ {phang2}{cmd:. sysuse auto}{p_end}
680
+ {phang2}{cmd:. reghdfe price weight (length=head), absorb(rep78)}{p_end}
681
+ {phang2}{cmd:. reghdfe price weight (length=head), absorb(rep78) first}{p_end}
682
+ {phang2}{cmd:. reghdfe price weight (length=head), absorb(rep78) ivsuite(ivregress)}{p_end}
683
+
684
+ {pstd}Factorial interactions{p_end}
685
+ {phang2}{cmd:. reghdfe price weight (length=head), absorb(rep78)}{p_end}
686
+ {phang2}{cmd:. reghdfe price weight length, absorb(rep78 turn##c.price)}{p_end}
687
+
688
+
689
+ {marker results}{...}
690
+ {title:Stored results}
691
+
692
+ {pstd}
693
+ {cmd:reghdfe} stores the following in {cmd:e()}:
694
+
695
+ {pstd}
696
+ {it:Note: it also keeps most e() results placed by the regression subcommands (ivreg2, ivregress)}
697
+
698
+ {synoptset 24 tabbed}{...}
699
+ {syntab:Scalars}
700
+ {synopt:{cmd:e(N)}}number of observations{p_end}
701
+ {synopt:{cmd:e(N_hdfe)}}number of absorbed fixed-effects{p_end}
702
+ {synopt:{cmd:e(tss)}}total sum of squares{p_end}
703
+ {synopt:{cmd:e(rss)}}residual sum of squares{p_end}
704
+ {synopt:{cmd:e(r2)}}R-squared{p_end}
705
+ {synopt:{cmd:e(r2_a)}}adjusted R-squared{p_end}
706
+ {synopt:{cmd:e(r2_within)}}Within R-squared{p_end}
707
+ {synopt:{cmd:e(r2_a_within)}}Adjusted Within R-squared{p_end}
708
+ {synopt:{cmd:e(df_a)}}degrees of freedom lost due to the fixed effects{p_end}
709
+ {synopt:{cmd:e(rmse)}}root mean squared error{p_end}
710
+ {synopt:{cmd:e(ll)}}log-likelihood{p_end}
711
+ {synopt:{cmd:e(ll_0)}}log-likelihood of fixed-effect-only regression{p_end}
712
+ {synopt:{cmd:e(F)}}F statistic{p_end}
713
+ {synopt:{cmd:e(F_absorb)}}F statistic for absorbed effect {it:note: currently disabled}{p_end}
714
+ {synopt:{cmd:e(rank)}}rank of {cmd:e(V)}{p_end}
715
+ {synopt:{cmd:e(N_clustervars)}}number of cluster variables{p_end}
716
+
717
+ {synopt:{cmd:e(clust}#{cmd:)}}number of clusters for the #th cluster variable{p_end}
718
+ {synopt:{cmd:e(N_clust)}}number of clusters; minimum of {it:e(clust#)}{p_end}
719
+
720
+ {synopt:{cmd:e(K}#{cmd:)}}Number of categories of the #th absorbed FE{p_end}
721
+ {synopt:{cmd:e(M}#{cmd:)}}Number of redundant categories of the #th absorbed FE{p_end}
722
+ {synopt:{cmd:e(mobility)}}Sum of all {cmd:e(M#)}{p_end}
723
+ {synopt:{cmd:e(df_m)}}model degrees of freedom{p_end}
724
+ {synopt:{cmd:e(df_r)}}residual degrees of freedom{p_end}
725
+
726
+ {synoptset 24 tabbed}{...}
727
+ {syntab:Macros}
728
+ {synopt:{cmd:e(cmd)}}{cmd:reghdfe}{p_end}
729
+ {synopt:{cmd:e(subcmd)}}either {cmd:regress}, {cmd:ivreg2} or {cmd:ivregress}{p_end}
730
+ {synopt:{cmd:e(model)}}{cmd:ols}, {cmd:iv}, {cmd:gmm2s}, {cmd:liml} or {cmd:cue}{p_end}
731
+ {synopt:{cmd:e(cmdline)}}command as typed{p_end}
732
+ {synopt:{cmd:e(dofmethod)}}dofmethod employed in the regression{p_end}
733
+ {synopt:{cmd:e(depvar)}}name of dependent variable{p_end}
734
+ {synopt:{cmd:e(indepvars)}}names of independent variables{p_end}
735
+ {synopt:{cmd:e(endogvars)}}names of endogenous right-hand-side variables{p_end}
736
+ {synopt:{cmd:e(instruments)}}names of excluded instruments{p_end}
737
+ {synopt:{cmd:e(absvars)}}name of the absorbed variables or interactions{p_end}
738
+ {synopt:{cmd:e(title)}}title in estimation output{p_end}
739
+ {synopt:{cmd:e(clustvar)}}name of cluster variable{p_end}
740
+ {synopt:{cmd:e(clustvar}#{cmd:)}}name of the #th cluster variable{p_end}
741
+ {synopt:{cmd:e(vce)}}{it:vcetype} specified in {cmd:vce()}{p_end}
742
+ {synopt:{cmd:e(vcetype)}}title used to label Std. Err.{p_end}
743
+ {synopt:{cmd:e(stage)}}stage within an IV-regression; only if {it:stages()} was used{p_end}
744
+ {synopt:{cmd:e(properties)}}{cmd:b V}{p_end}
745
+
746
+ {synoptset 24 tabbed}{...}
747
+ {syntab:Matrices}
748
+ {synopt:{cmd:e(b)}}coefficient vector{p_end}
749
+ {synopt:{cmd:e(V)}}variance-covariance matrix of the estimators{p_end}
750
+
751
+ {synoptset 24 tabbed}{...}
752
+ {syntab:Functions}
753
+ {synopt:{cmd:e(sample)}}marks estimation sample{p_end}
754
+ {p2colreset}{...}
755
+
756
+ {marker contact}{...}
757
+ {title:Author}
758
+
759
+ {pstd}Sergio Correia{break}
760
+ Fuqua School of Business, Duke University{break}
761
+ Email: {browse "mailto:[email protected]":[email protected]}
762
+ {p_end}
763
+
764
+ {marker user_guide}{...}
765
+ {title:User Guide}
766
+
767
+ {pstd}
768
+ A copy of this help file, as well as a more in-depth user guide is in development and will be available at {browse "http://scorreia.com/reghdfe"}.{p_end}
769
+
770
+ {marker updates}{...}
771
+ {title:Latest Updates}
772
+
773
+ {pstd}
774
+ {cmd:reghdfe} is updated frequently, and upgrades or minor bug fixes may not be immediately available in SSC.
775
+ To check or contribute to the latest version of reghdfe, explore the
776
+ {browse "https://github.com/sergiocorreia/reghdfe":Github repository}.
777
+ Bugs or missing features can be discussed through email or at the {browse "https://github.com/sergiocorreia/reghdfe/issues":Github issue tracker}.{p_end}
778
+
779
+ {pstd}
780
+ To see your current version and installed dependencies, type {cmd:reghdfe, version}
781
+ {p_end}
782
+
783
+ {marker acknowledgements}{...}
784
+ {title:Acknowledgements}
785
+
786
+ {pstd}
787
+ This package wouldn't have existed without the invaluable feedback and contributions of Paulo Guimaraes, Amine Ouazad, Mark Schaffer and Kit Baum. Also invaluable are the great bug-spotting abilities of many users.{p_end}
788
+
789
+ {pstd}In addition, {it:reghdfe} is build upon important contributions from the Stata community:{p_end}
790
+
791
+ {phang}{browse "https://ideas.repec.org/c/boc/bocode/s457101.html":reg2hdfe}, from Paulo Guimaraes,
792
+ and {browse "https://ideas.repec.org/c/boc/bocode/s456942.html":a2reg} from Amine Ouazad,
793
+ were the inspiration and building blocks on which reghdfe was built.{p_end}
794
+
795
+ {phang}{browse "http://www.repec.org/bocode/i/ivreg2.html":ivreg2}, by Christopher F Baum, Mark E Schaffer and Steven Stillman, is the package used by default for instrumental-variable regression.{p_end}
796
+
797
+ {phang}{browse "https://ideas.repec.org/c/boc/bocode/s457689.html":avar} by Christopher F Baum and Mark E Schaffer, is the package used for estimating the HAC-robust standard errors of ols regressions.{p_end}
798
+
799
+ {phang}{browse "http://econpapers.repec.org/software/bocbocode/s456797.htm":tuples} by Joseph Lunchman and Nicholas Cox, is used when computing standard errors with multi-way clustering (two or more clustering variables).{p_end}
800
+
801
+ {marker references}{...}
802
+ {title:References}
803
+
804
+ {p 0 0 2}
805
+ The algorithm underlying reghdfe is a generalization of the works by:
806
+
807
+ {phang}
808
+ Paulo Guimaraes and Pedro Portugal. "A Simple Feasible Alternative Procedure to Estimate
809
+ Models with High-Dimensional Fixed Effects".
810
+ {it:Stata Journal, 10(4), 628-649, 2010.}
811
+ {browse "http://www.stata-journal.com/article.html?article=st0212":[link]}
812
+ {p_end}
813
+
814
+ {phang}
815
+ Simen Gaure. "OLS with Multiple High Dimensional Category Dummies".
816
+ {it:Memorandum 14/2010, Oslo University, Department of Economics, 2010.}
817
+ {browse "https://ideas.repec.org/p/hhs/osloec/2010_014.html":[link]}
818
+ {p_end}
819
+
820
+ {p 0 0 2}
821
+ It addresses many of the limitation of previous works, such as possible lack of convergence, arbitrary slow convergence times,
822
+ and being limited to only two or three sets of fixed effects (for the first paper).
823
+ The paper explaining the specifics of the algorithm is a work-in-progress and available upon request.
824
+
825
+ {p 0 0 0}
826
+ If you use this program in your research, please cite either
827
+ the {browse "https://ideas.repec.org/c/boc/bocode/s457874.html":REPEC entry}
828
+ or the aforementioned papers.{p_end}
829
+
830
+ {title:Additional References}
831
+
832
+ {p 0 0 0}
833
+ For details on the Aitken acceleration technique employed, please see "method 3" as described by:
834
+
835
+ {phang}
836
+ Macleod, Allan J. "Acceleration of vector sequences by multi-dimensional Delta-2 methods."
837
+ {it:Communications in Applied Numerical Methods 2.4 (1986): 385-392.}
838
+ {p_end}
839
+
840
+ {p 0 0 0}
841
+ For the rationale behind interacting fixed effects with continuous variables, see:
842
+
843
+ {phang}
844
+ Duflo, Esther. "The medium run effects of educational expansion: Evidence from a large school construction program in Indonesia."
845
+ {it:Journal of Development Economics 74.1 (2004): 163-197.}{browse "http://www.sciencedirect.com/science/article/pii/S0304387803001846": [link]}
846
+ {p_end}
847
+
848
+ {p 0 0 0}
849
+ Also see:
850
+
851
+ {phang}Abowd, J. M., R. H. Creecy, and F. Kramarz 2002.
852
+ Computing person and firm effects using linked longitudinal employer-employee data.
853
+ {it:Census Bureau Technical Paper TP-2002-06.}
854
+ {p_end}
855
+
856
+ {phang}
857
+ Cameron, A. Colin & Gelbach, Jonah B. & Miller, Douglas L., 2011.
858
+ "Robust Inference With Multiway Clustering,"
859
+ {it:Journal of Business & Economic Statistics, American Statistical Association, vol. 29(2), pages 238-249.}
860
+ {p_end}
861
+
862
+ {phang}
863
+ Gormley, T. & Matsa, D. 2014.
864
+ "Common errors: How to (and not to) control for unobserved heterogeneity."
865
+ {it:The Review of Financial Studies, vol. 27(2), pages 617-661.}
866
+ {p_end}
867
+
868
+ {phang}
869
+ Mittag, N. 2012.
870
+ "New methods to estimate models with large sets of fixed effects with an application to matched employer-employee data from Germany."
871
+ {it:{browse "http://doku.iab.de/fdz/reporte/2012/MR_01-12_EN.pdf":FDZ-Methodenreport 02/2012}.}
872
+ {p_end}
30/replication_package/Adofiles/reghdfe_2019/reghdfe_old_estat.ado ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program reghdfe_old_estat, rclass
2
+ version `=cond(c(version)<14, c(version), 13)'
3
+ if "`e(cmd)'" != "reghdfe" {
4
+ error 301
5
+ }
6
+
7
+ gettoken key 0 : 0, parse(", ")
8
+ local lkey = length(`"`key'"')
9
+
10
+ if `"`key'"' == substr("summarize",1,max(2,`lkey')) {
11
+
12
+ local 0 `rest'
13
+ syntax [anything] , [*] [noheader] // -noheader- gets silently ignored b/c it will always be -on-
14
+
15
+ if ("`anything'"=="") {
16
+ * By default include the instruments
17
+ local anything `e(depvar)' `e(indepvars)' `e(endogvars)' `e(instruments)'
18
+ }
19
+
20
+ * Need to use -noheader- as a workaround to the bug in -estat_summ-
21
+ estat_summ `anything' , `options' noheader
22
+
23
+ }
24
+ else if `"`key'"' == "vce" {
25
+ vce `0'
26
+ }
27
+ else {
28
+ di as error `"invalid subcommand `key'"'
29
+ exit 321
30
+ }
31
+ return add // ?
32
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_old_footnote.ado ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -------------------------------------------------------------
2
+ // Display Regression Footnote
3
+ // -------------------------------------------------------------
4
+
5
+ program reghdfe_old_footnote
6
+ syntax [, linesize(int 79)]
7
+
8
+ local skip1 = max(`s(width_col1)'-1, 12) // works with both _coef_table, ivreg2 and ivregress
9
+
10
+ if ("`e(model)'"=="ols" & inlist("`e(vce)'", "unadjusted", "ols")) {
11
+ local dfa1 = e(df_a) + 1
12
+ local todisp `"F(`=e(df_a)-1', `e(df_r)') = "'
13
+ local skip3 = max(23-length(`"`todisp'"')-2,0)
14
+ local skip2 = max(14-length(`"`dfa1'"')-2,0)
15
+ local skip0 `skip1'
16
+
17
+ foreach fe in `e(extended_absvars)' {
18
+ local skip1 = max(`skip1', length("`fe'"))
19
+ }
20
+
21
+ di as text %`skip0's "Absorbed" " {c |}" ///
22
+ _skip(`skip3') `"`todisp'"' ///
23
+ as res %10.3f e(F_absorb) %8.3f fprob(e(df_a),e(df_r),e(F_absorb)) ///
24
+ as text _skip(13) `"(Joint test)"'
25
+
26
+ * Col width
27
+ local WX = `skip1' + 1
28
+
29
+ * Show by-fe FStats
30
+ * Relevant macros: NUM_FE, FE1, .., FE_TARGET1, .., FE_VARLIST
31
+ local r2 = 1 - e(rss0)/e(tss)
32
+ local r2_report %4.3f `r2'
33
+ forval i = 1/`e(N_hdfe_extended)' {
34
+ local fe : word `i' of `e(extended_absvars)'
35
+ if (e(F_absorb`i')<.) {
36
+ di as text %`skip1's "`fe'" " {c |}" _continue
37
+
38
+ local df_a_i = e(df_a`i') - (`i'==1)
39
+ local df_r_i = e(df_r`i')
40
+ local todisp `"F(`df_a_i', `df_r_i') = "'
41
+ local skip3 = max(23-length(`"`todisp'"')-2,0)
42
+ di as text _skip(`skip3') `"`todisp'"' _continue
43
+
44
+ di as res %10.3f e(F_absorb`i') %8.3f fprob(e(df_a`i'),e(df_r`i'),e(F_absorb`i')) _continue
45
+ di as text _skip(12) `"(Nested test)"'
46
+
47
+ local r2 = 1 - e(rss`i')/e(tss)
48
+ local r2_report `r2_report' " -> " %4.3f `r2'
49
+ *local cats = e(K`i') - e(M`i')
50
+ *local data = "`e(K`i')' categories, `e(M`i')' collinear, `cats' unique"
51
+ *local skip = 62 - length("`data'")
52
+ *di as text _skip(`skip') `"(`data')"'
53
+ }
54
+ }
55
+ di as text "{hline `=1+`skip0''}{c BT}{hline 64}"
56
+ if (e(rss0)<.) di as text " R-squared as we add HDFEs: " `r2_report'
57
+ } // regress-unadjusted specific
58
+ else {
59
+ foreach fe in `e(absvars)' {
60
+ local skip1 = max(`skip1', length("`fe'"))
61
+ }
62
+ local WX = `skip1' + 1
63
+ }
64
+
65
+ * Show category data
66
+ di as text
67
+ di as text "Absorbed degrees of freedom:"
68
+ di as text "{hline `WX'}{c TT}{hline 49}{c TRC}" // {c TT}{hline 14}"
69
+ di as text %`skip1's "Absorbed FE" " {c |}" ///
70
+ %13s "Num. Coefs." ///
71
+ %16s "= Categories" ///
72
+ %15s "- Redundant" ///
73
+ " {c |} " _continue
74
+
75
+ // if ("`e(corr1)'"!="") di as text %13s "Corr. w/xb" _continue
76
+ di as text _n "{hline `WX'}{c +}{hline 49}{c RT}" // {c +}{hline 14}"
77
+
78
+ local i 0
79
+ local explain_exact 0
80
+ local explain_nested 0
81
+
82
+ forval i = 1/`e(N_hdfe_extended)' {
83
+ local fe : word `i' of `e(extended_absvars)'
84
+
85
+
86
+ di as text %`skip1's "`fe'" " {c |}" _continue
87
+ local numcoefs = e(K`i') - e(M`i')
88
+ assert `numcoefs'<. & `numcoefs'>=0
89
+ local note = cond(`e(M`i'_exact)'==0, "?", " ")
90
+ if ("`note'"=="?") {
91
+ local explain_exact 1
92
+ }
93
+ else if (`e(M`i'_nested)'==1) {
94
+ local note *
95
+ local explain_nested 1
96
+ }
97
+
98
+ di as text %13s "`numcoefs'" _continue
99
+ di as text %16s "`e(K`i')'" _continue
100
+
101
+ di as text %15s "`e(M`i')'" _continue
102
+ di as text %2s "`note'" " {c |} " _continue
103
+ //if ("`e(corr`i')'"!="") {
104
+ // di as text %13.4f `e(corr`i')' _continue
105
+ //}
106
+ di
107
+ }
108
+ di as text "{hline `WX'}{c BT}{hline 49}{c BRC}" // {c BT}{hline 14}"
109
+ if (`explain_exact') di as text "? = number of redundant parameters may be higher"
110
+ if (`explain_nested') di as text `"* = fixed effect nested within cluster; treated as redundant for DoF computation"'
111
+ // di as text _skip(4) "Fixed effect indicators: " in ye "`e(absvars)'"
112
+
113
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_old_p.ado ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program define reghdfe_old_p
2
+ * (Maybe refactor using _pred_se ??)
3
+
4
+ local version `clip(`c(version)', 11.2, 13.1)' // 11.2 minimum, 13+ preferred
5
+ qui version `version'
6
+
7
+ *if "`e(cmd)'" != "reghdfe" {
8
+ * error 301
9
+ *}
10
+ syntax anything [if] [in] , [XB XBD D Residuals SCores STDP]
11
+ if (`"`scores'"' != "") {
12
+ _score_spec `anything'
13
+ local varlist `s(varlist)'
14
+ }
15
+ else {
16
+ local 0 `anything'
17
+ syntax newvarname // [if] [in] , [XB XBD D Residuals SCores]
18
+ }
19
+
20
+ local weight "[`e(wtype)'`e(wexp)']" // After -syntax-!!!
21
+ local option `xb' `xbd' `d' `residuals' `scores' `stdp'
22
+ if ("`option'"=="") local option xb // The default, as in -areg-
23
+ local numoptions : word count `option'
24
+ if (`numoptions'!=1) {
25
+ di as error "(predict reghdfe) syntax error; specify one and only one option"
26
+ exit 112
27
+ }
28
+ if ("`option'"=="scores") local option residuals
29
+
30
+ local fixed_effects "`e(absvars)'"
31
+
32
+ * Intercept stdp call
33
+ if ("`option'"=="stdp") {
34
+ _predict double `varlist' `if' `in', stdp
35
+ * la var `varlist' "STDP"
36
+ exit
37
+ }
38
+
39
+ * We need to have saved FEs and AvgEs for every option except -xb-
40
+ if ("`option'"!="xb") {
41
+
42
+ * Only estimate using e(sample) except when computing xb (when we don't need -d- and can predict out-of-sample)
43
+ if (`"`if'"'!="") {
44
+ local if `if' & e(sample)==1
45
+ }
46
+ else {
47
+ local if "if e(sample)==1"
48
+ }
49
+
50
+ * Construct -d- (sum of FEs)
51
+ tempvar d
52
+ if ("`e(equation_d)'"=="") {
53
+ di as error "In order to predict, all the FEs need to be saved with the absorb option (#`g' was not)"
54
+ di as error "For instance, instead of {it:absorb(i.year i.firm)}, set absorb(FE_YEAR=i.year FE_FIRM=i.firm)"
55
+ exit 112
56
+ }
57
+ qui gen double `d' = `e(equation_d)' `if' `in'
58
+
59
+ } // Finished creating `d' if needed
60
+
61
+ tempvar xb // XB will eventually contain XBD and RESID if that's the output
62
+ _predict double `xb' `if' `in', xb
63
+
64
+ if ("`option'"=="xb") {
65
+ rename `xb' `varlist'
66
+ }
67
+ else {
68
+ * Make residual have mean zero (and add that to -d-)
69
+ su `e(depvar)' `if' `in' `weight', mean
70
+ local mean = r(mean)
71
+ su `xb' `if' `in' `weight', mean
72
+ local mean = `mean' - r(mean)
73
+ su `d' `if' `in' `weight', mean
74
+ local mean = `mean' - r(mean)
75
+ qui replace `d' = `d' + `mean' `if' `in'
76
+
77
+ if ("`option'"=="d") {
78
+ rename `d' `varlist'
79
+ la var `varlist' "d[`fixed_effects']"
80
+ }
81
+ else if ("`option'"=="xbd") {
82
+ qui replace `xb' = `xb' + `d' `if' `in'
83
+ rename `xb' `varlist'
84
+ la var `varlist' "Xb + d[`fixed_effects']"
85
+ }
86
+ else if ("`option'"=="residuals") {
87
+ qui replace `xb' = `e(depvar)' - `xb' - `d' `if' `in'
88
+ rename `xb' `varlist'
89
+ la var `varlist' "Residuals"
90
+ }
91
+ else {
92
+ error 112
93
+ }
94
+ }
95
+
96
+ fvrevar `e(depvar)', list
97
+ local format : format `r(varlist)'
98
+ format `format' `varlist'
99
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_p.ado ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program define reghdfe_p, rclass
2
+ * Note: we IGNORE typlist and generate the newvar as double
3
+ * Note: e(resid) is missing outside of e(sample), so we don't need to condition on e(sample)
4
+
5
+ * HACK: Intersect -score- and replace with -residuals-
6
+ cap syntax anything [if] [in], SCore
7
+ loc was_score = !c(rc)
8
+ if (`was_score') {
9
+ * Call _score_spec to get newvarname; discard type
10
+ * - This resolves wildcards that -margins- sends to predict (e.g. var* -> var1)
11
+ * - Do we really need to pass it `if' and `in' ?
12
+ _score_spec `anything', score
13
+ loc 0 `s(varlist)' `if' `in' , residuals
14
+ }
15
+
16
+ syntax newvarname [if] [in] [, XB STDP Residuals D XBD DResiduals]
17
+
18
+ * Ensure there is only one option
19
+ opts_exclusive "`xb' `stdp' `residuals' `d' `xbd' `dresiduals'"
20
+
21
+ * Default option is xb
22
+ cap opts_exclusive "`xb' `stdp' `residuals' `d' `xbd' `dresiduals' placeholder"
23
+ if (!c(rc)) {
24
+ di as text "(option xb assumed; fitted values)"
25
+ loc xb "xb"
26
+ }
27
+
28
+ local fixed_effects "`e(absvars)'"
29
+
30
+ * Except for xb and stdp, we need the previously computed residuals
31
+ if ("`xb'" == "" & "`stdp'" == "") {
32
+ _assert ("`e(resid)'" != ""), msg("you must add the {bf:resid} option to reghdfe before running this prediction")
33
+ conf numeric var `e(resid)', exact
34
+ }
35
+
36
+ if ("`xb'" != "" | "`stdp'" != "") {
37
+ * xb: normal treatment
38
+ PredictXB `varlist' `if' `in', `xb' `stdp'
39
+ }
40
+ else if ("`residuals'" != "") {
41
+ * resid: just return the preexisting variable
42
+ gen double `varlist' = `e(resid)' `if' `in'
43
+ la var `varlist' "Residuals"
44
+ if (`was_score') return local scorevars `varlist'
45
+ }
46
+ else if ("`d'" != "") {
47
+ * d: y - xb - resid
48
+ tempvar xb
49
+ PredictXB `xb' `if' `in', xb
50
+ gen double `varlist' = `e(depvar)' - `xb' - `e(resid)' `if' `in'
51
+ la var `varlist' "d[`fixed_effects']"
52
+ }
53
+ else if ("`xbd'" != "") {
54
+ * xbd: y - resid
55
+ gen double `varlist' = `e(depvar)' - `e(resid)' `if' `in'
56
+ la var `varlist' "Xb + d[`fixed_effects']"
57
+ }
58
+ else if ("`dresiduals'" != "") {
59
+ * dresid: y - xb
60
+ tempvar xb
61
+ PredictXB `xb' `if' `in', xb
62
+ gen double `varlist' = `e(depvar)' - `xb' `if' `in'
63
+ }
64
+ else {
65
+ error 100
66
+ }
67
+ end
68
+
69
+ program PredictXB
70
+ syntax newvarname [if] [in], [*]
71
+ cap matrix list e(b) // if there are no regressors, _predict fails
72
+ if (c(rc)) {
73
+ gen double `varlist' = 0 `if' `in'
74
+ }
75
+ else {
76
+ _predict double `varlist' `if' `in', `options'
77
+ }
78
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_parse.ado ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ * This program should only be called by fixed_effects()
2
+ program reghdfe_parse, sclass
3
+
4
+ * Parse absorb
5
+ cap drop __hdfe* // destructive!
6
+ ms_parse_absvars `0'
7
+ loc extended_absvars `"`s(extended_absvars)'"'
8
+ mata: st_local("unquoted_absvars", subinstr(st_global("s(absvars)"), `"""', ""))
9
+ loc 0, `s(options)'
10
+ loc G = `s(G)'
11
+
12
+ * Main syntax
13
+ #d;
14
+ syntax, [
15
+
16
+ /* Model */
17
+ RESiduals(name) RESiduals2 /* use _reghdfe_resid */
18
+
19
+ /* Optimization (defaults are handled within Mata) */
20
+ TOLerance(real -1)
21
+ MAXITerations(real -1)
22
+ ALGorithm(string) /* map gt lsmr cg */
23
+ TRAnsform(string)
24
+ ACCELeration(string)
25
+ SLOPEmethod(string)
26
+ PRUNE
27
+ PRECONDition /* always compute LSMR preconditioner */
28
+
29
+ /* Memory usage (also see -compact- option) */
30
+ POOLsize(integer 0) /* Process variables in batches of # ; 0 turns it off */
31
+
32
+ /* Degrees-of-freedom Adjustments */
33
+ DOFadjustments(string)
34
+ GROUPVar(name) /* var with the first connected group between FEs */
35
+
36
+ CONDition // Report finite condition number; SLOW!
37
+ RRE(varname) // Report relative residual error
38
+ noCONstant // Report constant; enabled by default as otherwise -margins- fails
39
+
40
+ /* Duplicated options */
41
+ KEEPSINgletons
42
+ Verbose(numlist min=1 max=1 >=-1 <=5 integer)
43
+
44
+ ] [*] /* capture display options, etc. */
45
+ ;
46
+ #d cr
47
+
48
+ if ("`keepsingletons'"!="") sreturn loc drop_singletons = 0
49
+ if ("`verbose'"!="") sreturn loc verbose = `verbose'
50
+ sreturn loc report_constant = "`constant'" != "noconstant"
51
+
52
+ sreturn loc options `"`options'"'
53
+
54
+ assert "$reghdfe_touse" != ""
55
+ cap conf var $reghdfe_touse
56
+ if (c(rc)) gen byte $reghdfe_touse = 1
57
+ markout $reghdfe_touse `unquoted_absvars', strok
58
+
59
+ * Optimization
60
+ loc maxiterations = int(`maxiterations')
61
+ if (`tolerance' > 0) sreturn loc tolerance = `tolerance'
62
+ if (`maxiterations' > 0) sreturn loc maxiter = `maxiterations'
63
+
64
+ * Transforms: allow abbreviations (cim --> cimmino)
65
+ if ("`transform'" != "") {
66
+ loc transform = lower("`transform'")
67
+ loc valid_transforms cimmino kaczmarz symmetric_kaczmarz rand_kaczmarz
68
+ foreach x of local valid_transforms {
69
+ if (strpos("`x'", "`transform'")==1) loc transform `x'
70
+ }
71
+ _assert (`: list transform in valid_transforms'), msg("invalid transform: `transform'")
72
+ sreturn loc transform "`transform'"
73
+ }
74
+
75
+ * Accelerations
76
+ if ("`acceleration'" != "") {
77
+ loc acceleration = lower("`acceleration'")
78
+ if ("`acceleration'"=="cg") loc acceleration conjugate_gradient
79
+ if ("`acceleration'"=="sd") loc acceleration steepest_descent
80
+ if ("`acceleration'"=="off") loc acceleration none
81
+ loc valid_accelerations conjugate_gradient steepest_descent aitken none hybrid lsmr
82
+ foreach x of local valid_accelerations {
83
+ if (strpos("`x'", "`acceleration'")==1) loc acceleration `x'
84
+ }
85
+ _assert (`: list acceleration in valid_accelerations'), msg("invalid acceleration: `acceleration'")
86
+ sreturn loc acceleration "`acceleration'"
87
+ }
88
+
89
+ * Disable prune of degree-1 edges
90
+ if ("`prune'" == "prune") sreturn loc prune = 1
91
+
92
+ * Parse DoF Adjustments
93
+ if ("`dofadjustments'"=="") local dofadjustments all
94
+ loc 0 , `dofadjustments'
95
+ syntax, [ALL NONE] [FIRSTpair PAIRwise] [CLusters] [CONTinuous]
96
+ local opts `pairwise' `firstpair' `clusters' `continuous'
97
+ local n : word count `opts'
98
+ local first_opt : word 1 of `opt'
99
+ opts_exclusive "`all' `none'" dofadjustments
100
+ opts_exclusive "`pairwise' `firstpair'" dofadjustments
101
+ opts_exclusive "`all' `first_opt'" dofadjustments
102
+ opts_exclusive "`none' `first_opt'" dofadjustments
103
+ if ("`none'" != "") local opts
104
+ if ("`all'" != "") local opts pairwise clusters continuous
105
+ //if (`: list posof "three" in opts') {
106
+ // cap findfile group3hdfe.ado
107
+ // _assert !_rc , msg("error: -group3hdfe- not installed, please run {stata ssc install group3hdfe}")
108
+ //}
109
+ if ("`groupvar'"!="") conf new var `groupvar'
110
+ sreturn local dofadjustments "`opts'"
111
+ sreturn loc groupvar "`s(groupvar)'"
112
+
113
+ * Residuals
114
+ if ("`residuals2'" != "") {
115
+ _assert ("`residuals'" == ""), msg("residuals() syntax error")
116
+ cap drop _reghdfe_resid // destructive!
117
+ sreturn loc residuals _reghdfe_resid
118
+ }
119
+ else if ("`residuals'"!="") {
120
+ conf new var `residuals'
121
+ sreturn loc residuals `residuals'
122
+ }
123
+
124
+ * Misc
125
+ if ("`condition'"!="") {
126
+ _assert `G'==2, msg("Computing finite condition number requires two FEs")
127
+ sreturn loc finite_condition 1
128
+ }
129
+
130
+ sreturn loc compute_rre = ("`rre'" != "")
131
+ if ("`rre'" != "") {
132
+ sreturn loc rre `rre'
133
+ }
134
+
135
+ if (`poolsize' < 1) loc poolsize .
136
+ sreturn loc poolsize `poolsize'
137
+
138
+ sreturn loc precondition = "`precondition'" != ""
139
+ end
30/replication_package/Adofiles/reghdfe_2019/reghdfe_projections.mata ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Code that partials out (demean) a specific fixed effect
2
+ mata:
3
+
4
+ `Variables' panelmean(`Variables' y,
5
+ `Factor' f)
6
+ {
7
+ pointer(`Variable') Pw, Pcounts
8
+ `Boolean' has_weights
9
+ has_weights = asarray(f.extra, "has_weights") == J(0,0,.) ? 0 : asarray(f.extra, "has_weights")
10
+ assert(has_weights==0 | has_weights==1)
11
+
12
+ if (has_weights) {
13
+ Pw = &asarray(f.extra, "weights")
14
+ Pcounts = &asarray(f.extra, "weighted_counts")
15
+ return(editmissing(`panelsum'(y, *Pw, f.info) :/ *Pcounts, 0))
16
+ }
17
+ else {
18
+ return(`panelsum'(y, f.info) :/ f.counts)
19
+ }
20
+ }
21
+
22
+
23
+ `Matrix' precompute_inv_xx(`Factor' f,
24
+ `Boolean' has_intercept)
25
+ {
26
+ `Integer' i, L, K, offset
27
+ `Variables' x, tmp_x
28
+ `Variable' w, tmp_w
29
+ `Matrix' xmeans, inv_xx
30
+ `RowVector' tmp_xmeans
31
+ `Matrix' tmp_inv_xx
32
+ `Boolean' has_weights
33
+
34
+ has_weights = asarray(f.extra, "has_weights")
35
+
36
+ // x and w must be already sorted by the factor f
37
+ x = asarray(f.extra, "x")
38
+ L = f.num_levels
39
+ K = cols(x)
40
+ inv_xx = J(L * K, K, .)
41
+
42
+ if (has_weights) w = asarray(f.extra, "weights")
43
+ if (has_intercept) xmeans = asarray(f.extra, "xmeans")
44
+
45
+ for (i = 1; i <= L; i++) {
46
+ tmp_x = panelsubmatrix(x, i, f.info)
47
+ tmp_w = has_weights ? panelsubmatrix(w, i, f.info) : 1
48
+ if (has_intercept) {
49
+ tmp_xmeans = K > 1 ? xmeans[i, .] : xmeans[i]
50
+ tmp_inv_xx = invsym(quadcrossdev(tmp_x, tmp_xmeans, tmp_w, tmp_x, tmp_xmeans))
51
+ }
52
+ else {
53
+ tmp_inv_xx = invsym(quadcross(tmp_x, tmp_w, tmp_x))
54
+ }
55
+ offset = K * (i - 1)
56
+ inv_xx[|offset + 1, 1 \ offset + K , . |] = tmp_inv_xx
57
+ }
58
+ return(inv_xx)
59
+ }
60
+
61
+
62
+ `Variables' panelsolve_invsym(`Variables' y,
63
+ `Factor' f,
64
+ `Boolean' has_intercept,
65
+ | `Matrix' alphas)
66
+ {
67
+ `Integer' i, L, K, offset
68
+ `Variables' x, tmp_x, tmp_y, xbd, tmp_xbd
69
+ `Variable' w, tmp_w
70
+ `Matrix' xmeans, inv_xx
71
+ `RowVector' tmp_xmeans, tmp_ymeans
72
+ `Matrix' tmp_xy, tmp_inv_xx
73
+ `Boolean' has_weights
74
+ `Boolean' save_alphas
75
+ `Vector' b
76
+
77
+ has_weights = asarray(f.extra, "has_weights")
78
+ save_alphas = args()>=4 & alphas!=J(0,0,.)
79
+ // assert(has_weights==0 | has_weights==1)
80
+ if (save_alphas) assert(cols(y)==1)
81
+
82
+ // x, y and w must be already sorted by the factor f
83
+ L = f.num_levels
84
+ xbd = J(rows(y), cols(y), .)
85
+ x = asarray(f.extra, "x")
86
+ inv_xx = asarray(f.extra, "inv_xx")
87
+ K = cols(x)
88
+
89
+ if (has_weights) w = asarray(f.extra, "weights")
90
+ if (has_intercept) xmeans = asarray(f.extra, "xmeans")
91
+
92
+ for (i = 1; i <= L; i++) {
93
+ tmp_y = panelsubmatrix(y, i, f.info)
94
+ tmp_x = panelsubmatrix(x, i, f.info)
95
+ tmp_w = has_weights ? panelsubmatrix(w, i, f.info) : 1
96
+ offset = K * (i - 1)
97
+ tmp_inv_xx = inv_xx[|offset + 1, 1 \ offset + K , . |]
98
+
99
+ if (has_intercept) {
100
+ tmp_ymeans = mean(tmp_y, tmp_w)
101
+ tmp_xmeans = K > 1 ? xmeans[i, .] : xmeans[i]
102
+ tmp_xy = quadcrossdev(tmp_x, tmp_xmeans, tmp_w, tmp_y, tmp_ymeans)
103
+ if (save_alphas) {
104
+ b = tmp_inv_xx * tmp_xy
105
+ alphas[i, .] = tmp_ymeans - tmp_xmeans * b, b'
106
+ tmp_xbd = (tmp_x :- tmp_xmeans) * b :+ tmp_ymeans
107
+ }
108
+ else {
109
+ tmp_xbd = (tmp_x :- tmp_xmeans) * (tmp_inv_xx * tmp_xy) :+ tmp_ymeans
110
+ }
111
+ }
112
+ else {
113
+ tmp_xy = quadcross(tmp_x, tmp_w, tmp_y)
114
+ if (save_alphas) {
115
+ b = tmp_inv_xx * tmp_xy
116
+ alphas[i, .] = b'
117
+ tmp_xbd = tmp_x * b
118
+ }
119
+ else {
120
+ tmp_xbd = tmp_x * (tmp_inv_xx * tmp_xy)
121
+ }
122
+ }
123
+ xbd[|f.info[i,1], 1 \ f.info[i,2], .|] = tmp_xbd
124
+ }
125
+ return(f.invsort(xbd))
126
+ }
127
+
128
+ /*
129
+ `Variables' panelsolve_qrsolve(`Variables' Y, `Variables' X, `Factor' f)
130
+ {
131
+ `Integer' i
132
+ `Variables' x, y, betas
133
+
134
+ betas = J(f.num_levels, 1 + cols(X), .)
135
+
136
+ for (i = 1; i <= f.num_levels; i++) {
137
+ y = panelsubmatrix(Y, i, F.info)
138
+ x = panelsubmatrix(X, i, F.info) , J(rows(y), 1, 1)
139
+ betas[i, .] = qrsolve(x, y)'
140
+ }
141
+ return(betas)
142
+ }
143
+
144
+ */
145
+
146
+ // used with lsmr if we have fixed slopes
147
+ `Variables' reghdfe_panel_precondition(`Variables' y, `Factor' f)
148
+ {
149
+ `Vector' ans
150
+ pointer(`Variable') Pw
151
+ `Boolean' has_weights
152
+
153
+ has_weights = asarray(f.extra, "has_weights")
154
+ if (has_weights) {
155
+ Pw = &asarray(f.extra, "weights")
156
+ ans = `panelsum'(y:^2, *Pw, f.info)
157
+ }
158
+ else {
159
+ ans = `panelsum'(y, f.info)
160
+ }
161
+
162
+ ans = y :/ sqrt(ans)[f.levels]
163
+ return(ans)
164
+ }
165
+ end
166
+
30/replication_package/Adofiles/reghdfe_2019/reghdfe_store_alphas.ado ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program define reghdfe_store_alphas, eclass
2
+ mata: st_local("save_any_fe", strofreal(HDFE.save_any_fe))
3
+ assert inlist(`save_any_fe', 0, 1)
4
+ if (`save_any_fe') {
5
+ _assert e(depvar) != "", msg("e(depvar) is empty")
6
+ _assert e(resid) != "", msg("e(resid) is empty")
7
+ // we can't use -confirm var- because it might have TS operators
8
+ fvrevar `e(depvar)', list
9
+ confirm numeric var `e(resid)', exact
10
+ tempvar d
11
+ if (e(rank)) {
12
+ qui _predict double `d' if e(sample), xb
13
+ }
14
+ else if (e(report_constant)) {
15
+ gen double `d' = _b[_cons] if e(sample)
16
+ }
17
+ else {
18
+ gen double `d' = 0 if e(sample)
19
+ }
20
+ qui replace `d' = `e(depvar)' - `d' - `e(resid)' if e(sample)
21
+
22
+ mata: HDFE.store_alphas("`d'")
23
+ drop `d'
24
+
25
+ // Drop resid if we don't want to save it; and update e(resid)
26
+ cap drop __temp_reghdfe_resid__
27
+ if (!c(rc)) ereturn local resid
28
+ }
29
+ end